entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
Accuracy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bt/cbt3ifwhfqxk6gfux2vr2qvcztv4fen56aagzwb2jnxtnysnfw37.py
# Topologically Sorted Source Nodes: [pred, lab, eq, correct, float_1, truediv, acc], Original ATen: [aten.ge, aten.eq, aten.sum, aten._to_copy, aten.div, aten.mul]
# Source node to ATen node mapping:
# acc => mul
# correct => sum_1
# eq => eq
# float_1 => convert_element_type
# lab => ge_1
# pred => ge
# truediv => div
# Graph fragment:
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 0.5), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg1_1, 0.5), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%ge, %ge_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%eq,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convert_element_type, 256), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 100.0), kwargs = {})
triton_per_fused__to_copy_div_eq_ge_mul_sum_0 = async_compile.triton('triton_per_fused__to_copy_div_eq_ge_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_div_eq_ge_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [pred, lab, eq, correct, float_1, truediv, acc], Original ATen: [aten.ge, aten.eq, aten.sum, aten._to_copy, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0.run(arg0_1, arg1_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import Tensor
class Accuracy(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
def forward(self, pred: 'Tensor', lab: 'Tensor') ->Tensor:
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
return Accuracy.calculate(pred, lab)
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch import Tensor
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0[grid(1)](arg0_1,
arg1_1, buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AccuracyNew(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bharadwaj1098/sparseml | Accuracy | false | 6,327 | [
"Apache-2.0"
] | 1 | b43dc3edc9f7e6cd32368937b7ed3352180abe52 | https://github.com/bharadwaj1098/sparseml/tree/b43dc3edc9f7e6cd32368937b7ed3352180abe52 | from torch.nn import Module
import torch
from torch import Tensor
class Model(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
def forward(self, pred: 'Tensor', lab: 'Tensor') ->Tensor:
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
return Accuracy.calculate(pred, lab)
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xl/cxlpplg3hmt6k4x6alhg4yn6eq5jppxhzrzdcvvcpbupy7pjgudn.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
# Source node to ATen node mapping:
# max_1 => max_1
# out => exp
# score_1 => div
# src_max => clamp_min
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused_clamp_div_exp_max_sub_0 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fi/cfijnjpiz4ruggqhl6zhj4ujuexfsuzxvpo26muzj4bggik4i5hl.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# max_1 => max_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/h7/ch7xj7aj6agx6frik7qd7tffe2pmrsjjensiwx2hy2md4kde7aj5.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# max_1 => max_1
# out_1 => div_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1.run(buf1, buf0, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0)
del buf2
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.utils.data
def restricted_softmax(src, dim: 'int'=-1, margin: 'float'=0.0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0.0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
return self.compute_attention(query, key, value)
def compute_attention(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1,
buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
buf4 = buf0
del buf0
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def restricted_softmax(src, dim: 'int'=-1, margin: 'float'=0.0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0.0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class AttentionNew(torch.nn.Module):
def __init__(self, dropout=0):
super(AttentionNew, self).__init__()
self.dropout = dropout
def compute_attention(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| beneisner/pytorch_geometric | Attention | false | 6,328 | [
"MIT"
] | 1 | 53d44a96bd2de2753b1ab1d7153c026c92606a81 | https://github.com/beneisner/pytorch_geometric/tree/53d44a96bd2de2753b1ab1d7153c026c92606a81 | import math
import torch
import torch.nn.functional as F
import torch.utils.data
def restricted_softmax(src, dim: 'int'=-1, margin: 'float'=0.0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0.0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Model(torch.nn.Module):
def __init__(self, dropout=0):
super().__init__()
self.dropout = dropout
def forward(self, query, key, value):
return self.compute_attention(query, key, value)
def compute_attention(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
DentReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ie/ciegv7ufbmpw2w3nvhqfm2mgytwfsb4wkog2xdztcsyyrzl6ihey.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%arg0_1, [%bitwise_and], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -0.2
tmp2 = tmp0 >= tmp1
tmp3 = 0.0
tmp4 = tmp0 <= tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.where(tmp5, tmp3, tmp0)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DentReLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, p):
ctx.save_for_backward(input)
ctx.p = p
output = input.clone()
mask1 = p <= input
mask2 = input <= 0
output[mask1 & mask2] = 0
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
p = ctx.p
grad_input = grad_output.clone()
mask1 = p <= input
mask2 = input <= 0
grad_input[mask1 & mask2] = 0
return grad_input, None
class DentReLU(nn.Module):
def __init__(self, p: 'float'=-0.2):
super(DentReLU, self).__init__()
self.p = p
def forward(self, input):
return DentReLUFunction.apply(input, self.p)
def extra_repr(self):
return 'p={}'.format(self.p)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -0.2
tmp2 = tmp0 >= tmp1
tmp3 = 0.0
tmp4 = tmp0 <= tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.where(tmp5, tmp3, tmp0)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DentReLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, p):
ctx.save_for_backward(input)
ctx.p = p
output = input.clone()
mask1 = p <= input
mask2 = input <= 0
output[mask1 & mask2] = 0
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
p = ctx.p
grad_input = grad_output.clone()
mask1 = p <= input
mask2 = input <= 0
grad_input[mask1 & mask2] = 0
return grad_input, None
class DentReLUNew(nn.Module):
def __init__(self, p: 'float'=-0.2):
super(DentReLUNew, self).__init__()
self.p = p
def extra_repr(self):
return 'p={}'.format(self.p)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bfeng/pytorch-cifar | DentReLU | false | 6,329 | [
"MIT"
] | 1 | 6de257bb4b489429785502d487044c55bec62aae | https://github.com/bfeng/pytorch-cifar/tree/6de257bb4b489429785502d487044c55bec62aae | import torch
import torch.nn as nn
class DentReLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, p):
ctx.save_for_backward(input)
ctx.p = p
output = input.clone()
mask1 = p <= input
mask2 = input <= 0
output[mask1 & mask2] = 0
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
p = ctx.p
grad_input = grad_output.clone()
mask1 = p <= input
mask2 = input <= 0
grad_input[mask1 & mask2] = 0
return grad_input, None
class Model(nn.Module):
def __init__(self, p: 'float'=-0.2):
super().__init__()
self.p = p
def forward(self, input):
return DentReLUFunction.apply(input, self.p)
def extra_repr(self):
return 'p={}'.format(self.p)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LuongAttentionConcat | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jw/cjwwa4uchk246nlrjndt3n65ojf447r3xpsci6idv5lw24heuhdo.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %repeat], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = (xindex // 8)
x2 = (xindex // 32)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gd/cgdtd7uw2iemby2kfb22fx3vkhdbrpyx2y2l6nq45fmox3ad7stv.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qs/cqsyda2m63ct5ijcfgcipyyfn273chi5d3kmpjuf5asa7h4wdpdv.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/52/c52qmrrj3gmyqihsg5345v6p7s7bpkw6x6nyfsjpghq6g7bbu2d3.py
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# context_vector => mul
# context_vector_1 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_4 = async_compile.triton('triton_poi_fused_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_6
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_4.run(buf6, primals_2, buf7, 16, grid=grid(16), stream=stream0)
return (buf7, buf6, primals_2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf6, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LuongAttentionConcat(nn.Module):
def __init__(self, units, hidden_size):
super().__init__()
self.W = nn.Linear(2 * hidden_size, units)
self.V = nn.Linear(units, 1)
def forward(self, query, values):
query = torch.squeeze(query, 0)
query = torch.unsqueeze(query, 1)
query = query.repeat(1, values.shape[1], 1)
cat = torch.cat((values, query), dim=2)
score = self.V(torch.tanh(self.W(cat)))
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'units': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x3 = xindex // 8
x2 = xindex // 32
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_6
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused_mul_sum_4[grid(16)](buf6, primals_2, buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf7, buf6, primals_2, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf2, buf6, primals_5
class LuongAttentionConcatNew(nn.Module):
def __init__(self, units, hidden_size):
super().__init__()
self.W = nn.Linear(2 * hidden_size, units)
self.V = nn.Linear(units, 1)
def forward(self, input_0, input_1):
primals_3 = self.W.weight
primals_4 = self.W.bias
primals_5 = self.V.weight
primals_6 = self.V.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| beroguedou/nmt-pytorch | LuongAttentionConcat | false | 6,330 | [
"MIT"
] | 1 | 8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | https://github.com/beroguedou/nmt-pytorch/tree/8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, units, hidden_size):
super().__init__()
self.W = nn.Linear(2 * hidden_size, units)
self.V = nn.Linear(units, 1)
def forward(self, query, values):
query = torch.squeeze(query, 0)
query = torch.unsqueeze(query, 1)
query = query.repeat(1, values.shape[1], 1)
cat = torch.cat((values, query), dim=2)
score = self.V(torch.tanh(self.W(cat)))
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BahdanauAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2x/c2x66b6sza3svon43c774fqn45xzpdlajk7fj3gf6dzmp6nxl7jx.py
# Topologically Sorted Source Nodes: [sum_1, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# sum_1 => add
# tanh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = (xindex // 256)
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(out_ptr0 + (x6), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gt/cgt6ickymkk3j6ootc4hsb2blqews2n64wmaey7kzqk6e3v2rb4u.py
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# context_vector => mul
# context_vector_1 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_4), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, tanh], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf0, primals_3, buf1, primals_6, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_3
del primals_6
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf6, primals_4, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, buf6, primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2, buf6, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class BahdanauAttention(nn.Module):
def __init__(self, units, hidden_size):
super().__init__()
self.W1 = nn.Linear(hidden_size, units)
self.W2 = nn.Linear(hidden_size, units)
self.V = nn.Linear(units, 1)
def forward(self, query, values):
query = torch.squeeze(query, 0)
hidden_with_time_axis = torch.unsqueeze(query, 1)
sum_1 = self.W1(values) + self.W2(hidden_with_time_axis)
score = self.V(torch.tanh(sum_1))
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'units': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(out_ptr0 + x6, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_3, buf1,
primals_6, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
del primals_6
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_mul_sum_3[grid(256)](buf6, primals_4, buf7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf7, buf6, primals_4, reinterpret_tensor(primals_1, (64, 4), (4,
1), 0), buf2, buf6, primals_7
class BahdanauAttentionNew(nn.Module):
def __init__(self, units, hidden_size):
super().__init__()
self.W1 = nn.Linear(hidden_size, units)
self.W2 = nn.Linear(hidden_size, units)
self.V = nn.Linear(units, 1)
def forward(self, input_0, input_1):
primals_2 = self.W1.weight
primals_3 = self.W1.bias
primals_5 = self.W2.weight
primals_6 = self.W2.bias
primals_7 = self.V.weight
primals_8 = self.V.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| beroguedou/nmt-pytorch | BahdanauAttention | false | 6,331 | [
"MIT"
] | 1 | 8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | https://github.com/beroguedou/nmt-pytorch/tree/8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, units, hidden_size):
super().__init__()
self.W1 = nn.Linear(hidden_size, units)
self.W2 = nn.Linear(hidden_size, units)
self.V = nn.Linear(units, 1)
def forward(self, query, values):
query = torch.squeeze(query, 0)
hidden_with_time_axis = torch.unsqueeze(query, 1)
sum_1 = self.W1(values) + self.W2(hidden_with_time_axis)
score = self.V(torch.tanh(sum_1))
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
RC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ac/cac3fsg2etmtbrojzasevhhi6adzjv3vdon2r6addczf27vwsi2g.py
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class RC(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf2,
primals_3, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf3
class RCNew(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| benningtonlee7/AdaIn_Style_Transfer_From_Scratch_In_Pytorch | RC | false | 6,332 | [
"MIT"
] | 1 | 50dfe4bdcbcdd0f4e647f9ee45de2a3f81eb6722 | https://github.com/benningtonlee7/AdaIn_Style_Transfer_From_Scratch_In_Pytorch/tree/50dfe4bdcbcdd0f4e647f9ee45de2a3f81eb6722 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Decoder | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/w6/cw66djixvued3ctgslkkk6ref2q3s26fg5eeufcpvxq57xt5mctq.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_5 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%clone, %clone_1], 3), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y1 = (yindex // 16) % 4
x4 = xindex % 4
x5 = (xindex // 4)
y0 = yindex % 16
y6 = (yindex // 64)
y3 = (yindex // 256)
y7 = (yindex // 16) % 16
tmp0 = y1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (4*y1) + (8*x5) + (64*y0) + (1024*y6)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + (32 + x4 + (4*((-2) + y1)) + (8*x5) + (64*y0) + (1024*y6)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (y0 + (16*x4) + (64*y7) + (1024*x5) + (4096*y3)), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 64, 16, 4), (4096, 64, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 1024, 16, grid=grid(1024, 16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 64, 16, 4), (4096, 64, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
""" Encoder
"""
def __init__(self, n_levels, n_color, n_eccentricity, n_azimuth,
n_theta, n_phase):
super(Decoder, self).__init__()
self.n_levels = n_levels
self.n_color = n_color
self.n_eccentricity = n_eccentricity
self.n_azimuth = n_azimuth
self.n_theta = n_theta
self.n_phase = n_phase
self.h_size = (n_levels * n_color * n_eccentricity * n_azimuth *
n_theta * n_phase)
def forward(self, x, theta=None):
x = x.view(-1, self.n_color * self.n_theta * self.n_phase, self.
n_levels * self.n_eccentricity, self.n_azimuth)
lim = self.n_levels * self.n_eccentricity // 2
x_int = x[:, :, :lim, ...]
x_ext = x[:, :, lim:, ...]
x_list = []
for x in (x_int, x_ext):
if theta is not None:
theta_inv = theta
theta_inv[:, :, 2] = -theta[:, :, 2].detach()
grid = F.affine_grid(theta_inv, x.size())
x = F.grid_sample(x, grid)
x = x.view(-1, self.n_color, self.n_theta, self.n_phase, self.
n_levels, self.n_eccentricity // 2, self.n_azimuth)
x = x.permute(0, 4, 1, 5, 6, 2, 3).contiguous()
x_list.append(x)
x = torch.cat(x_list, 3)
return x
def get_inputs():
return [torch.rand([4, 64, 16, 4])]
def get_init_inputs():
return [[], {'n_levels': 4, 'n_color': 4, 'n_eccentricity': 4,
'n_azimuth': 4, 'n_theta': 4, 'n_phase': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y1 = yindex // 16 % 4
x4 = xindex % 4
x5 = xindex // 4
y0 = yindex % 16
y6 = yindex // 64
y3 = yindex // 256
y7 = yindex // 16 % 16
tmp0 = y1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 4 * y1 + 8 * x5 + 64 * y0 + 1024 * y6),
tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1, 1], 4, tl.int64)
tmp9 = tl.load(in_ptr0 + (32 + x4 + 4 * (-2 + y1) + 8 * x5 + 64 * y0 +
1024 * y6), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (y0 + 16 * x4 + 64 * y7 + 1024 * x5 + 4096 * y3),
tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 64, 16, 4), (4096, 64, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256,
64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1024, 16)](arg0_1, buf0, 1024, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DecoderNew(nn.Module):
""" Encoder
"""
def __init__(self, n_levels, n_color, n_eccentricity, n_azimuth,
n_theta, n_phase):
super(DecoderNew, self).__init__()
self.n_levels = n_levels
self.n_color = n_color
self.n_eccentricity = n_eccentricity
self.n_azimuth = n_azimuth
self.n_theta = n_theta
self.n_phase = n_phase
self.h_size = (n_levels * n_color * n_eccentricity * n_azimuth *
n_theta * n_phase)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bicv/POLO | Decoder | false | 6,333 | [
"MIT"
] | 1 | b8d4f9014796a4eb24c178d8be611a0b3b4c44df | https://github.com/bicv/POLO/tree/b8d4f9014796a4eb24c178d8be611a0b3b4c44df | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
""" Encoder
"""
def __init__(self, n_levels, n_color, n_eccentricity, n_azimuth,
n_theta, n_phase):
super().__init__()
self.n_levels = n_levels
self.n_color = n_color
self.n_eccentricity = n_eccentricity
self.n_azimuth = n_azimuth
self.n_theta = n_theta
self.n_phase = n_phase
self.h_size = (n_levels * n_color * n_eccentricity * n_azimuth *
n_theta * n_phase)
def forward(self, x, theta=None):
x = x.view(-1, self.n_color * self.n_theta * self.n_phase, self.
n_levels * self.n_eccentricity, self.n_azimuth)
lim = self.n_levels * self.n_eccentricity // 2
x_int = x[:, :, :lim, ...]
x_ext = x[:, :, lim:, ...]
x_list = []
for x in (x_int, x_ext):
if theta is not None:
theta_inv = theta
theta_inv[:, :, 2] = -theta[:, :, 2].detach()
grid = F.affine_grid(theta_inv, x.size())
x = F.grid_sample(x, grid)
x = x.view(-1, self.n_color, self.n_theta, self.n_phase, self.
n_levels, self.n_eccentricity // 2, self.n_azimuth)
x = x.permute(0, 4, 1, 5, 6, 2, 3).contiguous()
x_list.append(x)
x = torch.cat(x_list, 3)
return x
def get_inputs():
return [torch.rand([4, 64, 16, 4])]
def get_init_inputs():
return [[], {'n_levels': 4, 'n_color': 4, 'n_eccentricity': 4,
'n_azimuth': 4, 'n_theta': 4, 'n_phase': 4}]
|
ImageProcessingModuleAlt | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/n6/cn64ke3s3pdkg3wdymzcubb5tggbca3esoui642q6wyevps6hm3w.py
# Topologically Sorted Source Nodes: [conv2d, observation], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# observation => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 107648
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3364) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mi/cmivq5mi236pi4amolhbvctmfwzoqynunqusvlnqjlvnfc6feeuh.py
# Topologically Sorted Source Nodes: [conv2d_1, observation_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# observation_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 2704) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3l/c3l3gvjt2sl5bqkjmrflwl257ejqofzmyydtujr5h5y34mj3s6ld.py
# Topologically Sorted Source Nodes: [conv2d_2, observation_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# observation_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16928
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 2116) % 2
x0 = xindex % 2116
x3 = (xindex // 2116)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (2176*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (8, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 8, 7, 7), (392, 49, 7, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (2, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 58, 58), (26912, 3364, 58, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, observation], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 107648, grid=grid(107648), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 52, 52), (10816, 2704, 52, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, observation_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 43264, grid=grid(43264), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 2, 46, 46), (4232, 2116, 46, 1))
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 2, 46, 46), (4352, 2176, 46, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, observation_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_2.run(buf5, primals_7, buf6, 16928, grid=grid(16928), stream=stream0)
del primals_7
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 7, 7), (392, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ImageProcessingModuleAlt(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=n_filters * 2,
kernel_size=7)
self.conv2 = nn.Conv2d(in_channels=n_filters * 2, out_channels=
n_filters, kernel_size=7)
self.conv3 = nn.Conv2d(in_channels=n_filters, out_channels=
n_filters // 2, kernel_size=7)
def forward(self, observation):
observation = F.relu(self.conv1(observation))
observation = F.relu(self.conv2(observation))
observation = F.relu(self.conv3(observation))
return observation
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'n_filters': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 107648
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 2704 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16928
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 2116 % 2
x0 = xindex % 2116
x3 = xindex // 2116
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2176 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (8, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 8, 7, 7), (392, 49, 7, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 58, 58), (26912, 3364, 58, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(107648)](buf1, primals_2,
107648, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 52, 52), (10816, 2704, 52, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(43264)](buf3, primals_5,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 2, 46, 46), (4232, 2116, 46, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 2, 46, 46), (4352, 2176, 46, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(16928)](
buf5, primals_7, buf6, 16928, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6
class ImageProcessingModuleAltNew(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=n_filters * 2,
kernel_size=7)
self.conv2 = nn.Conv2d(in_channels=n_filters * 2, out_channels=
n_filters, kernel_size=7)
self.conv3 = nn.Conv2d(in_channels=n_filters, out_channels=
n_filters // 2, kernel_size=7)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| bentrevett/task-oriented-language-grounding | ImageProcessingModuleAlt | false | 6,334 | [
"MIT"
] | 1 | 812a7bc21ee622030eb0594c576c7d60dc630148 | https://github.com/bentrevett/task-oriented-language-grounding/tree/812a7bc21ee622030eb0594c576c7d60dc630148 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=n_filters * 2,
kernel_size=7)
self.conv2 = nn.Conv2d(in_channels=n_filters * 2, out_channels=
n_filters, kernel_size=7)
self.conv3 = nn.Conv2d(in_channels=n_filters, out_channels=
n_filters // 2, kernel_size=7)
def forward(self, observation):
observation = F.relu(self.conv1(observation))
observation = F.relu(self.conv2(observation))
observation = F.relu(self.conv3(observation))
return observation
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4]
|
MultimodalFusionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3c/c3ca5jcqzmfmchablhanpws7uzgk37phzbvn5f25c6d4hs2gep5u.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %primals_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = (xindex // 16)
x4 = xindex % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x4), None, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x5), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, primals_1, buf1, 4096, grid=grid(4096), stream=stream0)
return (buf1, primals_1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MultimodalFusionModule(nn.Module):
def __init__(self, emb_dim, n_filters):
super().__init__()
self.fc_h = nn.Linear(emb_dim, n_filters)
def forward(self, image, instruction):
_batch_size, _n_filters, _height, _width = image.shape
a = torch.sigmoid(self.fc_h(instruction))
m = a.unsqueeze(-1).unsqueeze(-1)
out = (m * image).contiguous()
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4, 'n_filters': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 16
x4 = xindex % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x4, None, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x5, tmp3, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(4096)](buf0, primals_1, buf1, 4096,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf0
class MultimodalFusionModuleNew(nn.Module):
def __init__(self, emb_dim, n_filters):
super().__init__()
self.fc_h = nn.Linear(emb_dim, n_filters)
def forward(self, input_0, input_1):
primals_2 = self.fc_h.weight
primals_3 = self.fc_h.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| bentrevett/task-oriented-language-grounding | MultimodalFusionModule | false | 6,335 | [
"MIT"
] | 1 | 812a7bc21ee622030eb0594c576c7d60dc630148 | https://github.com/bentrevett/task-oriented-language-grounding/tree/812a7bc21ee622030eb0594c576c7d60dc630148 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, emb_dim, n_filters):
super().__init__()
self.fc_h = nn.Linear(emb_dim, n_filters)
def forward(self, image, instruction):
_batch_size, _n_filters, _height, _width = image.shape
a = torch.sigmoid(self.fc_h(instruction))
m = a.unsqueeze(-1).unsqueeze(-1)
out = (m * image).contiguous()
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LuongAttentionDot | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2e/c2eferrbqxypo5fk7ib5eili43xk3jdta3hem5btqxd7iihf2qxz.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# score => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5niyk56fa3m2oc45eqgnf3eeposiu56eefgo6q2ozqlq3nwxnc.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# score => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/b5/cb57nun6jjun2cwkszxwtvk2d267nw6cyauy55j5lgvkhlimse4p.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = (xindex // 256)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ec/cecrci7mbsdtf4e7ozsv4mojsyebns6z2s3ptww4ywjkuqbkkyis.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = (xindex // 256)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7j/c7jivixx7gjqujdkrqy3kukiw2roxuk3jcj7f6ia7224neqpwpkf.py
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# context_vector => mul
# context_vector_1 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_4 = async_compile.triton('triton_poi_fused_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg1_1, buf0, 1024, grid=grid(1024), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg0_1, buf1, 1024, grid=grid(1024), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), out=buf2)
del buf0
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 1024, grid=grid(1024), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf3, buf4, 1024, grid=grid(1024), stream=stream0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_4.run(buf4, arg1_1, buf5, 256, grid=grid(256), stream=stream0)
del arg1_1
return (buf5, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LuongAttentionDot(nn.Module):
def __init__(self):
super().__init__()
def forward(self, query, values):
query = torch.squeeze(query, 0)
query = torch.unsqueeze(query, 1)
query_transposed = query.transpose(2, 1)
score = torch.matmul(values, query_transposed)
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](arg1_1, buf0, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](arg0_1, buf1, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), out=buf2)
del buf0
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(1024)](buf2, buf3, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf2
triton_poi_fused__softmax_3[grid(1024)](buf3, buf4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_4[grid(256)](buf4, arg1_1, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
return buf5, buf4
class LuongAttentionDotNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| beroguedou/nmt-pytorch | LuongAttentionDot | false | 6,336 | [
"MIT"
] | 1 | 8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | https://github.com/beroguedou/nmt-pytorch/tree/8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, query, values):
query = torch.squeeze(query, 0)
query = torch.unsqueeze(query, 1)
query_transposed = query.transpose(2, 1)
score = torch.matmul(values, query_transposed)
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cc/ccc3rfqzulhvn4nmibqhwjsomjzanxwwc34nr7d7enltyb7czw67.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 500
x2 = (xindex // 2000)
x3 = xindex % 2000
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (2016*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (2048*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/od/cod4chcodxhfkpwgvalpo3wjcotr3qr3momwpd2myrvazuq2fy67.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# x => relu
# x_1 => view_2
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %view_2 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu, [64, 500]), kwargs = {})
triton_poi_fused_relu_view_1 = async_compile.triton('triton_poi_fused_relu_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 500
x1 = (xindex // 500)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (500*(x1 % 4)) + (2016*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (500, 4), (4, 1))
assert_size_stride(primals_2, (500, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (20, 500), (500, 1))
assert_size_stride(primals_5, (20, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 500), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf0, primals_2, buf1, buf4, 32000, grid=grid(32000), stream=stream0)
del primals_2
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_1.run(buf1, buf2, 32000, grid=grid(32000), stream=stream0)
del buf1
buf3 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (500, 20), (1, 500), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 20), (320, 80, 20, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((500, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 500), (500, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, feature_num):
super(Net, self).__init__()
self.layer_1 = nn.Linear(feature_num, 500)
self.layer_2 = nn.Linear(500, 20)
def forward(self, x):
x = F.relu(self.layer_1(x))
x = self.layer_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feature_num': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 500
x2 = xindex // 2000
x3 = xindex % 2000
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 2016 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 2048 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 500
x1 = xindex // 500
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 500 * (x1 % 4) + 2016 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (500, 4), (4, 1))
assert_size_stride(primals_2, (500,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (20, 500), (500, 1))
assert_size_stride(primals_5, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 500), (500, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 500), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 500), (8064, 2016, 500, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 500), (8192, 2048, 500, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(32000)](buf0,
primals_2, buf1, buf4, 32000, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_2
buf2 = buf0
del buf0
triton_poi_fused_relu_view_1[grid(32000)](buf1, buf2, 32000, XBLOCK
=256, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(500, 20), (1, 500), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 20), (320, 80, 20, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class NetNew(nn.Module):
def __init__(self, feature_num):
super(NetNew, self).__init__()
self.layer_1 = nn.Linear(feature_num, 500)
self.layer_2 = nn.Linear(500, 20)
def forward(self, input_0):
primals_1 = self.layer_1.weight
primals_2 = self.layer_1.bias
primals_4 = self.layer_2.weight
primals_5 = self.layer_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bm2-lab/scPrivacy | Net | false | 6,337 | [
"MIT"
] | 1 | 444c8f3a5e7b890c299cd823359e5414f73d6205 | https://github.com/bm2-lab/scPrivacy/tree/444c8f3a5e7b890c299cd823359e5414f73d6205 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, feature_num):
super().__init__()
self.layer_1 = nn.Linear(feature_num, 500)
self.layer_2 = nn.Linear(500, 20)
def forward(self, x):
x = F.relu(self.layer_1(x))
x = self.layer_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# hidden => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_3
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'n_classes': 4, 'hidden_dim': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_3, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = buf0
del buf0
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class MLPNew(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLPNew, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input_0):
primals_2 = self.l_in.weight
primals_3 = self.l_in.bias
primals_4 = self.l_out.weight
primals_5 = self.l_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bigdata-ustc/DisenQNet | MLP | false | 6,338 | [
"MIT"
] | 1 | 908fadeb9b8d278450213deff70205703bd91da6 | https://github.com/bigdata-ustc/DisenQNet/tree/908fadeb9b8d278450213deff70205703bd91da6 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super().__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
PairwiseBCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vf/cvfjgkoel2zhys242m4bi5mjv4jqnvyo2wiry7glxexa3ccphlm3.py
# Topologically Sorted Source Nodes: [bce_loss, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean]
# Source node to ATen node mapping:
# bce_loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub_1, sub_2, sub_3
# loss => mean
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_mean_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [bce_loss, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from abc import abstractmethod
import torch.utils.data.dataloader
import torch.nn.functional as F
import torch.nn as nn
import torch.nn
import torch.optim.optimizer
class SimilarityLoss(nn.Module):
def __init__(self):
super(SimilarityLoss, self).__init__()
@abstractmethod
def forward(self, inputs, targets):
pass
class PairwiseBCELoss(SimilarityLoss):
"""
Binary cross entropy between pair similarities and pair labels.
"""
def __init__(self, balanced=False):
super(PairwiseBCELoss, self).__init__()
self.balanced = balanced
def forward(self, inputs, targets):
n = inputs.shape[0]
neg_targets = torch.ones_like(targets) - targets
bce_loss = F.binary_cross_entropy_with_logits(inputs, targets,
reduction='none')
if self.balanced:
weight_matrix = n * (targets / 2.0 + neg_targets / (2.0 * (n - 1)))
bce_loss *= weight_matrix
loss = bce_loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from abc import abstractmethod
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
import torch.optim.optimizer
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SimilarityLoss(nn.Module):
def __init__(self):
super(SimilarityLoss, self).__init__()
@abstractmethod
def forward(self, inputs, targets):
pass
class PairwiseBCELossNew(SimilarityLoss):
"""
Binary cross entropy between pair similarities and pair labels.
"""
def __init__(self, balanced=False):
super(PairwiseBCELossNew, self).__init__()
self.balanced = balanced
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bogdankostic/flair | PairwiseBCELoss | false | 6,339 | [
"MIT"
] | 1 | 8cf03eab19512e94c1bcb4a30409bb065d37fe25 | https://github.com/bogdankostic/flair/tree/8cf03eab19512e94c1bcb4a30409bb065d37fe25 | import torch
from abc import abstractmethod
import torch.utils.data.dataloader
import torch.nn.functional as F
import torch.nn as nn
import torch.nn
import torch.optim.optimizer
class SimilarityLoss(nn.Module):
def __init__(self):
super().__init__()
@abstractmethod
def forward(self, inputs, targets):
pass
class Model(SimilarityLoss):
"""
Binary cross entropy between pair similarities and pair labels.
"""
def __init__(self, balanced=False):
super().__init__()
self.balanced = balanced
def forward(self, inputs, targets):
n = inputs.shape[0]
neg_targets = torch.ones_like(targets) - targets
bce_loss = F.binary_cross_entropy_with_logits(inputs, targets,
reduction='none')
if self.balanced:
weight_matrix = n * (targets / 2.0 + neg_targets / (2.0 * (n - 1)))
bce_loss *= weight_matrix
loss = bce_loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FociDetector | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pr/cpr426qthpcwimov7jipi4nse6j3yc3a6tpqqcsunqmgsfmvsy77.py
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# output => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fx/cfxej7auzckcka2helzzejfy6a7fgocetaacp6vujezdiee6insh.py
# Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# output_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 10240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aj/caj6t42vlpavypj2nbsrmnkvvz5h34mnq7caffu4ghottwcodw5d.py
# Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# output_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wu/cwujthp6dbahkh3aoc5nqdfper2qtmcmsz4htrlpffuhd26r3jcx.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (10, ), (1, ))
assert_size_stride(primals_6, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_7, (10, ), (1, ))
assert_size_stride(primals_8, (1, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 32, 32), (10240, 1024, 32, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 40960, grid=grid(40960), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 10, 16, 16), (2560, 256, 16, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 10240, grid=grid(10240), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 8, 8), (640, 64, 8, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 2560, grid=grid(2560), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 8, 8), (64, 64, 8, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class FociDetector(nn.Module):
def __init__(self, input_channels=3, input_size=17, ksize=5,
hidden_channels=10):
super(FociDetector, self).__init__()
self.conv1 = nn.Conv2d(input_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv3 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.finalmapsize = ksize
self.convf = nn.Conv2d(hidden_channels, 1, self.finalmapsize,
padding=int((ksize - 1) / 2))
self.relu = nn.ReLU()
def forward(self, x):
output = self.relu(self.conv1(x))
output = self.relu(self.conv2(output))
output = self.relu(self.conv3(output))
output = self.convf(output)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 10
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 10
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2560
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_7, (10,), (1,))
assert_size_stride(primals_8, (1, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 32, 32), (10240, 1024, 32, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(40960)](buf1, primals_2,
40960, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 10, 16, 16), (2560, 256, 16, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(10240)](buf3, primals_5,
10240, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 8, 8), (640, 64, 8, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(2560)](buf5, primals_7,
2560, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 8, 8), (64, 64, 8, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(256)](buf7, primals_9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class FociDetectorNew(nn.Module):
def __init__(self, input_channels=3, input_size=17, ksize=5,
hidden_channels=10):
super(FociDetectorNew, self).__init__()
self.conv1 = nn.Conv2d(input_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv3 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.finalmapsize = ksize
self.convf = nn.Conv2d(hidden_channels, 1, self.finalmapsize,
padding=int((ksize - 1) / 2))
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.convf.weight
primals_9 = self.convf.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| bharath272/centrosome-analysis | FociDetector | false | 6,340 | [
"MIT"
] | 1 | 6ae3744be464812b3767909420d7b78cea9da670 | https://github.com/bharath272/centrosome-analysis/tree/6ae3744be464812b3767909420d7b78cea9da670 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, input_channels=3, input_size=17, ksize=5,
hidden_channels=10):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv3 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.finalmapsize = ksize
self.convf = nn.Conv2d(hidden_channels, 1, self.finalmapsize,
padding=int((ksize - 1) / 2))
self.relu = nn.ReLU()
def forward(self, x):
output = self.relu(self.conv1(x))
output = self.relu(self.conv2(output))
output = self.relu(self.conv3(output))
output = self.convf(output)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
LuongAttentionGeneral | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gr/cgrvn4fvi7ctm4qrwi4qjxhojcbd5ubnyfxs7y56y7kyxke25hmo.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# score => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5niyk56fa3m2oc45eqgnf3eeposiu56eefgo6q2ozqlq3nwxnc.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# score => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/b5/cb57nun6jjun2cwkszxwtvk2d267nw6cyauy55j5lgvkhlimse4p.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_4, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = (xindex // 256)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ec/cecrci7mbsdtf4e7ozsv4mojsyebns6z2s3ptww4ywjkuqbkkyis.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = (xindex // 256)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7j/c7jivixx7gjqujdkrqy3kukiw2roxuk3jcj7f6ia7224neqpwpkf.py
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# context_vector => mul
# context_vector_1 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_4), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_4 = async_compile.triton('triton_poi_fused_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_1
buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 1024, grid=grid(1024), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 1024, grid=grid(1024), stream=stream0)
del buf4
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [context_vector, context_vector_1], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_4.run(buf5, primals_4, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, buf5, primals_4, buf5, reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LuongAttentionGeneral(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.W = nn.Linear(hidden_size, hidden_size)
def forward(self, query, values):
query = torch.squeeze(query, 0)
query = torch.unsqueeze(query, 1)
query_transposed = query.transpose(2, 1)
score = torch.matmul(self.W(values), query_transposed)
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 64
x2 = xindex // 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](buf0, primals_3, buf1, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_1[grid(1024)](primals_1, buf2, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = buf1
del buf1
triton_poi_fused__softmax_2[grid(1024)](buf3, buf4, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf3
triton_poi_fused__softmax_3[grid(1024)](buf4, buf5, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del buf4
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_mul_sum_4[grid(256)](buf5, primals_4, buf6, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf6, buf5, primals_4, buf5, reinterpret_tensor(buf2, (64, 4, 4),
(16, 1, 4), 0)
class LuongAttentionGeneralNew(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.W = nn.Linear(hidden_size, hidden_size)
def forward(self, input_0, input_1):
primals_2 = self.W.weight
primals_3 = self.W.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| beroguedou/nmt-pytorch | LuongAttentionGeneral | false | 6,341 | [
"MIT"
] | 1 | 8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | https://github.com/beroguedou/nmt-pytorch/tree/8758ba33e2d5f4eca7f1ac2d04582678332bbcd5 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.W = nn.Linear(hidden_size, hidden_size)
def forward(self, query, values):
query = torch.squeeze(query, 0)
query = torch.unsqueeze(query, 1)
query_transposed = query.transpose(2, 1)
score = torch.matmul(self.W(values), query_transposed)
attention_weights = F.softmax(score, dim=1)
context_vector = attention_weights * values
context_vector = context_vector.sum(1)
return context_vector, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), buf3, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_unit(layer):
inp = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(inp)
return -lim, lim
class Actor(nn.Module):
def __init__(self, state_size, action_size, seed=2, fc_units=256):
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, action_size)
self.reset_weights()
def reset_weights(self):
self.fc1.weight.data.uniform_(*hidden_unit(self.fc1))
self.fc2.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
x = F.relu(self.fc1(state))
return torch.tanh(self.fc2(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf4, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_1[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), buf3, primals_4, buf4
def hidden_unit(layer):
inp = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(inp)
return -lim, lim
class ActorNew(nn.Module):
def __init__(self, state_size, action_size, seed=2, fc_units=256):
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, action_size)
self.reset_weights()
def reset_weights(self):
self.fc1.weight.data.uniform_(*hidden_unit(self.fc1))
self.fc2.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bnriiitb/Deep-Reinforcement-Learning | Actor | false | 6,342 | [
"MIT"
] | 1 | 5649a9d86fbec32fe3ac9cbb923d0d3a4c692d1e | https://github.com/bnriiitb/Deep-Reinforcement-Learning/tree/5649a9d86fbec32fe3ac9cbb923d0d3a4c692d1e | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_unit(layer):
inp = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(inp)
return -lim, lim
class Model(nn.Module):
def __init__(self, state_size, action_size, seed=2, fc_units=256):
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, action_size)
self.reset_weights()
def reset_weights(self):
self.fc1.weight.data.uniform_(*hidden_unit(self.fc1))
self.fc2.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
x = F.relu(self.fc1(state))
return torch.tanh(self.fc2(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PositionwiseFeedforwardLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/23/c23p7mzbu3tuw3h544yc6vkom55w3r24x3tq7agdzk7kmijrfdy3.py
# Topologically Sorted Source Nodes: [relu, x], Original ATen: [aten.relu, aten.pow]
# Source node to ATen node mapping:
# relu => relu
# x => pow_1
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
triton_poi_fused_pow_relu_0 = async_compile.triton('triton_poi_fused_pow_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu, x], Original ATen: [aten.relu, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_relu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, hid_dim: 'int', pf_dim: 'int', dropout: 'float') ->None:
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x: 'torch.FloatTensor') ->torch.FloatTensor:
x = F.relu(self.fc_1(x)) ** 2
x = self.fc_2(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hid_dim': 4, 'pf_dim': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tmp2 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_relu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class PositionwiseFeedforwardLayerNew(nn.Module):
def __init__(self, hid_dim: 'int', pf_dim: 'int', dropout: 'float') ->None:
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.fc_1.weight
primals_2 = self.fc_1.bias
primals_4 = self.fc_2.weight
primals_5 = self.fc_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bob80333/investigating_extrapolation | PositionwiseFeedforwardLayer | false | 6,343 | [
"MIT"
] | 1 | fc4f72baa46b8490968f7ad546897937feb8b25d | https://github.com/bob80333/investigating_extrapolation/tree/fc4f72baa46b8490968f7ad546897937feb8b25d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hid_dim: 'int', pf_dim: 'int', dropout: 'float') ->None:
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x: 'torch.FloatTensor') ->torch.FloatTensor:
x = F.relu(self.fc_1(x)) ** 2
x = self.fc_2(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
KopoinANNNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gz/cgzsvvv437wotw6wxl6ccd3pe3mteyaie3w6fyzbscxu4xitsb57.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_1 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 2), (2, 1))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (2, 2), (2, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 128, grid=grid(128), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 2), (1, 2), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_0.run(buf3, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2), (2, 1), 0), reinterpret_tensor(primals_6, (2, 2), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class KopoinANNNetwork(nn.Module):
def __init__(self, featShape):
super(KopoinANNNetwork, self).__init__()
self.featShape = featShape
self.act = nn.Sigmoid()
self.layer0 = nn.Linear(featShape, featShape // 2)
self.layer1 = nn.Linear(featShape // 2, featShape // 2)
self.layer2 = nn.Linear(featShape // 2, 2)
def forward(self, x):
x = self.layer0(x)
x = self.act(x)
x = self.layer1(x)
x = self.act(x)
x = self.layer2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'featShape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 2), (2, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (2, 2), (2, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(128)](buf1, primals_2, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0),
reinterpret_tensor(primals_4, (2, 2), (1, 2), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf2
triton_poi_fused_sigmoid_0[grid(128)](buf3, primals_5, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_6, (2, 2), (1, 2), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class KopoinANNNetworkNew(nn.Module):
def __init__(self, featShape):
super(KopoinANNNetworkNew, self).__init__()
self.featShape = featShape
self.act = nn.Sigmoid()
self.layer0 = nn.Linear(featShape, featShape // 2)
self.layer1 = nn.Linear(featShape // 2, featShape // 2)
self.layer2 = nn.Linear(featShape // 2, 2)
def forward(self, input_0):
primals_1 = self.layer0.weight
primals_2 = self.layer0.bias
primals_4 = self.layer1.weight
primals_5 = self.layer1.bias
primals_6 = self.layer2.weight
primals_7 = self.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| bmd2007/benchmark_eval | KopoinANNNetwork | false | 6,344 | [
"MIT"
] | 1 | aa42bb3369e79db4cb63e1963afcc8af6d8f5696 | https://github.com/bmd2007/benchmark_eval/tree/aa42bb3369e79db4cb63e1963afcc8af6d8f5696 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, featShape):
super().__init__()
self.featShape = featShape
self.act = nn.Sigmoid()
self.layer0 = nn.Linear(featShape, featShape // 2)
self.layer1 = nn.Linear(featShape // 2, featShape // 2)
self.layer2 = nn.Linear(featShape // 2, 2)
def forward(self, x):
x = self.layer0(x)
x = self.act(x)
x = self.layer1(x)
x = self.act(x)
x = self.layer2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BertPooler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# pooled_output => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mg/cmgugfl5ayyiszoktgyz6u2hcenlsq7ic6lreafn7w7gkeqefa54.py
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.gelu]
# Source node to ATen node mapping:
# pooled_output => add
# pooled_output_1 => add_1, erf, mul, mul_1, mul_2
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_gelu_1 = async_compile.triton('triton_poi_fused_add_gelu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gelu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.gelu]
triton_poi_fused_add_gelu_1.run(buf1, primals_3, buf2, 64, grid=grid(64), stream=stream0)
return (buf2, primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional
from torch import nn
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.GELU()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_gelu_1[grid(64)](buf1, primals_3, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1
class BertPoolerNew(nn.Module):
def __init__(self, config):
super(BertPoolerNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.GELU()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bj1103/FaST-VGS-Family | BertPooler | false | 6,345 | [
"BSD-3-Clause"
] | 1 | 824f987a5bd647fc17aa34b98eb1d9109441d64b | https://github.com/bj1103/FaST-VGS-Family/tree/824f987a5bd647fc17aa34b98eb1d9109441d64b | from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional
from torch import nn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.GELU()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PatchMerge | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cd/ccdx2fddfeepacucdj3letxeueyreism77yexvyvwrzrkvq6mbcn.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 128, grid=grid(128), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class PatchMerge(nn.Module):
"""
Implements the Patch Merge operator from Swin Transformer
"""
def __init__(self, channels: 'int', window_size: 'int'=2):
super(PatchMerge, self).__init__()
self.merger = nn.Conv2d(in_channels=channels, out_channels=
window_size * channels, kernel_size=window_size, stride=
window_size, padding=(window_size - 1) // 2)
def forward(self, inputs: 'torch.Tensor') ->torch.Tensor:
return self.merger(inputs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(128)](buf1, primals_2, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class PatchMergeNew(nn.Module):
"""
Implements the Patch Merge operator from Swin Transformer
"""
def __init__(self, channels: 'int', window_size: 'int'=2):
super(PatchMergeNew, self).__init__()
self.merger = nn.Conv2d(in_channels=channels, out_channels=
window_size * channels, kernel_size=window_size, stride=
window_size, padding=(window_size - 1) // 2)
def forward(self, input_0):
primals_1 = self.merger.weight
primals_2 = self.merger.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bradezard131/swin-transformer | PatchMerge | false | 6,346 | [
"MIT"
] | 1 | 72e38cbae8bda332d03dced814d10b45185c04de | https://github.com/bradezard131/swin-transformer/tree/72e38cbae8bda332d03dced814d10b45185c04de | import torch
from torch import nn
class Model(nn.Module):
"""
Implements the Patch Merge operator from Swin Transformer
"""
def __init__(self, channels: 'int', window_size: 'int'=2):
super().__init__()
self.merger = nn.Conv2d(in_channels=channels, out_channels=
window_size * channels, kernel_size=window_size, stride=
window_size, padding=(window_size - 1) // 2)
def forward(self, inputs: 'torch.Tensor') ->torch.Tensor:
return self.merger(inputs)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PatchEmbed | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qy/cqyu5l2p6xh633a7thd2tte3bszrg4ugscf2y523iookhmpheqal.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (768*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4c/c4ckui43udehobca2kb3vy5stpaqfztmtjwrdinx2dhmcmh73fmo.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [16, 16], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = (yindex // 768)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (768*x2) + (12288*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_3, (768, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 2304, 256, grid=grid(2304, 256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf2, primals_3, buf3, 3072, 16, grid=grid(3072, 16), stream=stream0)
del buf2
del primals_3
return (reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, 3, 16, 16), (768, 256, 16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
_B, _C, _H, _W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 768 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 12288 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (768, 3, 16, 16), (768, 256, 16, 1))
assert_size_stride(primals_3, (768,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((768, 3, 16, 16), (768, 1, 48, 3), torch.
float32)
triton_poi_fused_1[grid(2304, 256)](primals_2, buf1, 2304, 256,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf0, buf1, stride=(16, 16),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 4, 4), (12288, 1, 3072, 768))
buf3 = empty_strided_cuda((4, 768, 4, 4), (12288, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_2[grid(3072, 16)](buf2, primals_3,
buf3, 3072, 16, XBLOCK=16, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_3
return reinterpret_tensor(buf3, (4, 16, 768), (12288, 1, 16), 0
), buf0, buf1
class PatchEmbedNew(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, input_0):
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bmi-imaginelab/CD-Net-Histopathology-Representation-Learning-using-Pyramidal-Context-Detail-Network | PatchEmbed | false | 6,347 | [
"Apache-2.0"
] | 1 | cc4dad85cdeea7295cb48f6f947fd1ac25d8862e | https://github.com/bmi-imaginelab/CD-Net-Histopathology-Representation-Learning-using-Pyramidal-Context-Detail-Network/tree/cc4dad85cdeea7295cb48f6f947fd1ac25d8862e | import torch
import torch.nn as nn
class Model(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = img_size // patch_size * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
_B, _C, _H, _W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
LunarLanderDQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/d4/cd4rmmsd46lha6l4ut5a6nmc4wf52z3irm2fxiokbhgjhmlh75hj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 12), (12, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 12), (12, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 768, grid=grid(768), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(primals_4, (12, 12), (1, 12), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 768, grid=grid(768), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 12), (12, 1), 0), reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(buf3, (64, 12), (12, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LunarLanderDQN(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=12):
super(LunarLanderDQN, self).__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.fc3 = nn.Linear(hidden, action_space_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_space_dim': 4, 'action_space_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 12), (12, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 12), (12, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(768)](buf1,
primals_2, buf6, 768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 12), (12, 1), 0),
reinterpret_tensor(primals_4, (12, 12), (1, 12), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 12), (192, 48, 12, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(768)](buf3,
primals_5, buf5, 768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 12),
(12, 1), 0), reinterpret_tensor(primals_6, (12, 4), (1, 12), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(
buf3, (64, 12), (12, 1), 0), primals_6, buf5, primals_4, buf6
class LunarLanderDQNNew(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=12):
super(LunarLanderDQNNew, self).__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.fc3 = nn.Linear(hidden, action_space_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| breno-aberle/rl-pong-project | LunarLanderDQN | false | 6,348 | [
"MIT"
] | 1 | 9dc0d12e4bbcdb2905d46f66e84fac6d70c7831d | https://github.com/breno-aberle/rl-pong-project/tree/9dc0d12e4bbcdb2905d46f66e84fac6d70c7831d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=12):
super().__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.fc3 = nn.Linear(hidden, action_space_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DuelingQNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/e4/ce4x5yyigsb7innoktwrjgrzgq6jtwqrfrlvem4khtct7477ynrb.py
# Topologically Sorted Source Nodes: [adv], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# adv => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nl/cnlp53tjaaclmkilyizp32cbhjp6ctd3j4psucie664opwp5nivh.py
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# x => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %addmm_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %expand_1), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (48, 4), (4, 1))
assert_size_stride(primals_3, (48, ), (1, ))
assert_size_stride(primals_4, (48, 4), (4, 1))
assert_size_stride(primals_5, (48, ), (1, ))
assert_size_stride(primals_6, (4, 48), (48, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 48), (48, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 48), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [adv], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 192, grid=grid(192), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_4, (4, 48), (1, 4), 0), out=buf2)
del primals_4
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [val], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 192, grid=grid(192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [adv_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (48, 4), (1, 48), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (48, 1), (1, 48), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf5, primals_9, buf4, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del buf5
del primals_9
return (buf6, primals_1, buf1, buf3, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((48, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((48, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 48), (48, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 48), (48, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class DuelingQNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
"""
super(DuelingQNetwork, self).__init__()
self.action_size = action_size
self.seed = torch.manual_seed(seed)
self.fc1_adv = nn.Linear(state_size, fc1_units)
self.fc1_val = nn.Linear(state_size, fc1_units)
self.fc2_adv = nn.Linear(fc1_units, action_size)
self.fc2_val = nn.Linear(fc1_units, 1)
def forward(self, state):
"""Build a network that maps state -> action values."""
batch_size = state.size(0)
adv = F.relu(self.fc1_adv(state))
val = F.relu(self.fc1_val(state))
adv = self.fc2_adv(adv)
x_size = batch_size
val = self.fc2_val(val).expand(x_size, self.action_size)
x = val + adv - adv.mean(1).unsqueeze(1).expand(x_size, self.
action_size)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (48, 4), (4, 1))
assert_size_stride(primals_3, (48,), (1,))
assert_size_stride(primals_4, (48, 4), (4, 1))
assert_size_stride(primals_5, (48,), (1,))
assert_size_stride(primals_6, (4, 48), (48, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 48), (48, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 48), (48, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 48),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(192)](buf1, primals_3, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 48), (48, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_4, (4, 48),
(1, 4), 0), out=buf2)
del primals_4
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(192)](buf3, primals_5, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6,
(48, 4), (1, 48), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (48, 1), (1,
48), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_sub_1[grid(16)](buf5, primals_9, buf4, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del buf5
del primals_9
return buf6, primals_1, buf1, buf3, primals_8, primals_6
class DuelingQNetworkNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
"""
super(DuelingQNetworkNew, self).__init__()
self.action_size = action_size
self.seed = torch.manual_seed(seed)
self.fc1_adv = nn.Linear(state_size, fc1_units)
self.fc1_val = nn.Linear(state_size, fc1_units)
self.fc2_adv = nn.Linear(fc1_units, action_size)
self.fc2_val = nn.Linear(fc1_units, 1)
def forward(self, input_0):
primals_2 = self.fc1_adv.weight
primals_3 = self.fc1_adv.bias
primals_4 = self.fc1_val.weight
primals_5 = self.fc1_val.bias
primals_6 = self.fc2_adv.weight
primals_7 = self.fc2_adv.bias
primals_8 = self.fc2_val.weight
primals_9 = self.fc2_val.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| bobiblazeski/navigation | DuelingQNetwork | false | 6,349 | [
"MIT"
] | 1 | bb863b4475a90ff26bede20af647ae4882a0f6fb | https://github.com/bobiblazeski/navigation/tree/bb863b4475a90ff26bede20af647ae4882a0f6fb | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
"""
super().__init__()
self.action_size = action_size
self.seed = torch.manual_seed(seed)
self.fc1_adv = nn.Linear(state_size, fc1_units)
self.fc1_val = nn.Linear(state_size, fc1_units)
self.fc2_adv = nn.Linear(fc1_units, action_size)
self.fc2_val = nn.Linear(fc1_units, 1)
def forward(self, state):
"""Build a network that maps state -> action values."""
batch_size = state.size(0)
adv = F.relu(self.fc1_adv(state))
val = F.relu(self.fc1_val(state))
adv = self.fc2_adv(adv)
x_size = batch_size
val = self.fc2_val(val).expand(x_size, self.action_size)
x = val + adv - adv.mean(1).unsqueeze(1).expand(x_size, self.
action_size)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/oj/coj4vhwkep7b74evfuavwwvnuiwhcjhftm3zs7h4fbzk3cj7adbq.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3600) % 32
x0 = xindex % 3600
x4 = (xindex // 3600)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vl/cvleeq76qau5psjsb4wvofda7pxb6nnype2zwqgkapdiqogsk5wf.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 32, 60, 60), (115712, 3616, 60, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 460800, grid=grid(460800), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 115200, grid=grid(115200), stream=stream0)
return (buf2, primals_1, primals_3, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 32
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 32, 60, 60), (115712, 3616, 60, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(460800)](buf0, primals_2,
buf1, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(115200)](buf1, buf2,
buf3, 115200, XBLOCK=512, num_warps=8, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bongsang/face-landmark | Net | false | 6,350 | [
"MIT"
] | 1 | bc7644480be1ddf8d35c2875d251bc84c00ccaa7 | https://github.com/bongsang/face-landmark/tree/bc7644480be1ddf8d35c2875d251bc84c00ccaa7 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
RankingLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ei/ceinvgcpzgbnvzzkf7c5i3c2enhdduwygzv3zzsjlmaj4v3fbqd2.py
# Topologically Sorted Source Nodes: [ones_like, neg_targets, add, sub_1, relu, ranking_loss_matrix_1, neg_targets_01_sum, truediv, sum_3, mean, mul_2, add_1, sub_2, relu_1, ranking_loss_matrix_10, neg_targets_10_sum, truediv_1, sum_4, mean_1, mul_3, loss], Original ATen: [aten.ones_like, aten.sub, aten.add, aten.relu, aten.mul, aten.sum, aten.div, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# loss => add_2
# mean => mean
# mean_1 => mean_1
# mul_2 => mul_2
# mul_3 => mul_3
# neg_targets => sub
# neg_targets_01_sum => sum_1
# neg_targets_10_sum => sum_2
# ones_like => full_default
# ranking_loss_matrix_1 => mul
# ranking_loss_matrix_10 => mul_1
# relu => relu
# relu_1 => relu_1
# sub_1 => sub_1
# sub_2 => sub_2
# sum_3 => sum_3
# sum_4 => sum_4
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %arg1_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %view), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %relu), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sum_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_3,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %view_1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %relu_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [0]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sum_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_1, [0]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_4,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 33, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (5*r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + (1))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp18 = tl.load(in_ptr0 + (2))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp22 = tl.load(in_ptr0 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp27 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (4))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = tl.load(in_ptr0 + (5))
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp41 = tl.load(in_ptr0 + (6))
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp45 = tl.load(in_ptr0 + (7))
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp51 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr0 + (8))
tmp59 = tl.broadcast_to(tmp58, [XBLOCK, RBLOCK])
tmp61 = tl.load(in_ptr0 + (9))
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp65 = tl.load(in_ptr0 + (10))
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp69 = tl.load(in_ptr0 + (11))
tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK])
tmp75 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp77 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr0 + (12))
tmp83 = tl.broadcast_to(tmp82, [XBLOCK, RBLOCK])
tmp85 = tl.load(in_ptr0 + (13))
tmp86 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK])
tmp89 = tl.load(in_ptr0 + (14))
tmp90 = tl.broadcast_to(tmp89, [XBLOCK, RBLOCK])
tmp93 = tl.load(in_ptr0 + (15))
tmp94 = tl.broadcast_to(tmp93, [XBLOCK, RBLOCK])
tmp99 = tl.load(in_ptr0 + (r0), None)
tmp101 = tl.load(in_ptr1 + (r0), None)
tmp106 = tl.load(in_ptr0 + (4 + r0), None)
tmp109 = tl.load(in_ptr0 + (8 + r0), None)
tmp112 = tl.load(in_ptr0 + (12 + r0), None)
tmp116 = tl.load(in_ptr1 + (4 + r0), None)
tmp123 = tl.load(in_ptr1 + (8 + r0), None)
tmp130 = tl.load(in_ptr1 + (12 + r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = 0.1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl.full([1, 1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tmp2 * tmp9
tmp13 = tmp1 - tmp12
tmp16 = tmp1 - tmp15
tmp17 = tmp13 + tmp16
tmp20 = tmp1 - tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp1 - tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp10 / tmp25
tmp28 = tmp1 - tmp27
tmp30 = tmp29 + tmp4
tmp31 = tmp30 - tmp6
tmp32 = triton_helpers.maximum(tmp8, tmp31)
tmp33 = tmp28 * tmp32
tmp36 = tmp1 - tmp35
tmp39 = tmp1 - tmp38
tmp40 = tmp36 + tmp39
tmp43 = tmp1 - tmp42
tmp44 = tmp40 + tmp43
tmp47 = tmp1 - tmp46
tmp48 = tmp44 + tmp47
tmp49 = tmp33 / tmp48
tmp50 = tmp26 + tmp49
tmp52 = tmp1 - tmp51
tmp54 = tmp53 + tmp4
tmp55 = tmp54 - tmp6
tmp56 = triton_helpers.maximum(tmp8, tmp55)
tmp57 = tmp52 * tmp56
tmp60 = tmp1 - tmp59
tmp63 = tmp1 - tmp62
tmp64 = tmp60 + tmp63
tmp67 = tmp1 - tmp66
tmp68 = tmp64 + tmp67
tmp71 = tmp1 - tmp70
tmp72 = tmp68 + tmp71
tmp73 = tmp57 / tmp72
tmp74 = tmp50 + tmp73
tmp76 = tmp1 - tmp75
tmp78 = tmp77 + tmp4
tmp79 = tmp78 - tmp6
tmp80 = triton_helpers.maximum(tmp8, tmp79)
tmp81 = tmp76 * tmp80
tmp84 = tmp1 - tmp83
tmp87 = tmp1 - tmp86
tmp88 = tmp84 + tmp87
tmp91 = tmp1 - tmp90
tmp92 = tmp88 + tmp91
tmp95 = tmp1 - tmp94
tmp96 = tmp92 + tmp95
tmp97 = tmp81 / tmp96
tmp98 = tmp74 + tmp97
tmp100 = tmp1 - tmp99
tmp102 = tmp101 + tmp4
tmp103 = tmp102 - tmp6
tmp104 = triton_helpers.maximum(tmp8, tmp103)
tmp105 = tmp100 * tmp104
tmp107 = tmp1 - tmp106
tmp108 = tmp100 + tmp107
tmp110 = tmp1 - tmp109
tmp111 = tmp108 + tmp110
tmp113 = tmp1 - tmp112
tmp114 = tmp111 + tmp113
tmp115 = tmp105 / tmp114
tmp117 = tmp116 + tmp4
tmp118 = tmp117 - tmp6
tmp119 = triton_helpers.maximum(tmp8, tmp118)
tmp120 = tmp107 * tmp119
tmp121 = tmp120 / tmp114
tmp122 = tmp115 + tmp121
tmp124 = tmp123 + tmp4
tmp125 = tmp124 - tmp6
tmp126 = triton_helpers.maximum(tmp8, tmp125)
tmp127 = tmp110 * tmp126
tmp128 = tmp127 / tmp114
tmp129 = tmp122 + tmp128
tmp131 = tmp130 + tmp4
tmp132 = tmp131 - tmp6
tmp133 = triton_helpers.maximum(tmp8, tmp132)
tmp134 = tmp113 * tmp133
tmp135 = tmp134 / tmp114
tmp136 = tmp129 + tmp135
tmp137 = tl.broadcast_to(tmp98, [XBLOCK, RBLOCK])
tmp139 = tl.sum(tmp137, 1)[:, None]
tmp140 = tl.broadcast_to(tmp136, [XBLOCK, RBLOCK])
tmp142 = tl.sum(tmp140, 1)[:, None]
tmp143 = 4.0
tmp144 = tmp139 / tmp143
tmp145 = 0.5
tmp146 = tmp144 * tmp145
tmp147 = tmp142 / tmp143
tmp148 = tmp147 * tmp145
tmp149 = tmp146 + tmp148
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp149, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [ones_like, neg_targets, add, sub_1, relu, ranking_loss_matrix_1, neg_targets_01_sum, truediv, sum_3, mean, mul_2, add_1, sub_2, relu_1, ranking_loss_matrix_10, neg_targets_10_sum, truediv_1, sum_4, mean_1, mul_3, loss], Original ATen: [aten.ones_like, aten.sub, aten.add, aten.relu, aten.mul, aten.sum, aten.div, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0.run(buf4, arg1_1, arg0_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from abc import abstractmethod
import torch.utils.data.dataloader
import torch.nn.functional as F
import torch.nn as nn
import torch.nn
import torch.optim.optimizer
class SimilarityLoss(nn.Module):
def __init__(self):
super(SimilarityLoss, self).__init__()
@abstractmethod
def forward(self, inputs, targets):
pass
class RankingLoss(SimilarityLoss):
"""
Triplet ranking loss between pair similarities and pair labels.
"""
def __init__(self, margin=0.1, direction_weights=[0.5, 0.5]):
super(RankingLoss, self).__init__()
self.margin = margin
self.direction_weights = direction_weights
def forward(self, inputs, targets):
n = inputs.shape[0]
neg_targets = torch.ones_like(targets) - targets
ranking_loss_matrix_01 = neg_targets * F.relu(self.margin + inputs -
torch.diag(inputs).view(n, 1))
ranking_loss_matrix_10 = neg_targets * F.relu(self.margin + inputs -
torch.diag(inputs).view(1, n))
neg_targets_01_sum = torch.sum(neg_targets, dim=1)
neg_targets_10_sum = torch.sum(neg_targets, dim=0)
loss = self.direction_weights[0] * torch.mean(torch.sum(
ranking_loss_matrix_01 / neg_targets_01_sum, dim=1)
) + self.direction_weights[1] * torch.mean(torch.sum(
ranking_loss_matrix_10 / neg_targets_10_sum, dim=0))
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from abc import abstractmethod
import torch.utils.data.dataloader
import torch.nn as nn
import torch.nn
import torch.optim.optimizer
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 5 * r0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.load(in_ptr0 + 1)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp18 = tl.load(in_ptr0 + 2)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp22 = tl.load(in_ptr0 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp27 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + 4)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = tl.load(in_ptr0 + 5)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp41 = tl.load(in_ptr0 + 6)
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp45 = tl.load(in_ptr0 + 7)
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp51 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr0 + 8)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK, RBLOCK])
tmp61 = tl.load(in_ptr0 + 9)
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp65 = tl.load(in_ptr0 + 10)
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp69 = tl.load(in_ptr0 + 11)
tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK])
tmp75 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp77 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr0 + 12)
tmp83 = tl.broadcast_to(tmp82, [XBLOCK, RBLOCK])
tmp85 = tl.load(in_ptr0 + 13)
tmp86 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK])
tmp89 = tl.load(in_ptr0 + 14)
tmp90 = tl.broadcast_to(tmp89, [XBLOCK, RBLOCK])
tmp93 = tl.load(in_ptr0 + 15)
tmp94 = tl.broadcast_to(tmp93, [XBLOCK, RBLOCK])
tmp99 = tl.load(in_ptr0 + r0, None)
tmp101 = tl.load(in_ptr1 + r0, None)
tmp106 = tl.load(in_ptr0 + (4 + r0), None)
tmp109 = tl.load(in_ptr0 + (8 + r0), None)
tmp112 = tl.load(in_ptr0 + (12 + r0), None)
tmp116 = tl.load(in_ptr1 + (4 + r0), None)
tmp123 = tl.load(in_ptr1 + (8 + r0), None)
tmp130 = tl.load(in_ptr1 + (12 + r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = 0.1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl.full([1, 1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tmp2 * tmp9
tmp13 = tmp1 - tmp12
tmp16 = tmp1 - tmp15
tmp17 = tmp13 + tmp16
tmp20 = tmp1 - tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp1 - tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp10 / tmp25
tmp28 = tmp1 - tmp27
tmp30 = tmp29 + tmp4
tmp31 = tmp30 - tmp6
tmp32 = triton_helpers.maximum(tmp8, tmp31)
tmp33 = tmp28 * tmp32
tmp36 = tmp1 - tmp35
tmp39 = tmp1 - tmp38
tmp40 = tmp36 + tmp39
tmp43 = tmp1 - tmp42
tmp44 = tmp40 + tmp43
tmp47 = tmp1 - tmp46
tmp48 = tmp44 + tmp47
tmp49 = tmp33 / tmp48
tmp50 = tmp26 + tmp49
tmp52 = tmp1 - tmp51
tmp54 = tmp53 + tmp4
tmp55 = tmp54 - tmp6
tmp56 = triton_helpers.maximum(tmp8, tmp55)
tmp57 = tmp52 * tmp56
tmp60 = tmp1 - tmp59
tmp63 = tmp1 - tmp62
tmp64 = tmp60 + tmp63
tmp67 = tmp1 - tmp66
tmp68 = tmp64 + tmp67
tmp71 = tmp1 - tmp70
tmp72 = tmp68 + tmp71
tmp73 = tmp57 / tmp72
tmp74 = tmp50 + tmp73
tmp76 = tmp1 - tmp75
tmp78 = tmp77 + tmp4
tmp79 = tmp78 - tmp6
tmp80 = triton_helpers.maximum(tmp8, tmp79)
tmp81 = tmp76 * tmp80
tmp84 = tmp1 - tmp83
tmp87 = tmp1 - tmp86
tmp88 = tmp84 + tmp87
tmp91 = tmp1 - tmp90
tmp92 = tmp88 + tmp91
tmp95 = tmp1 - tmp94
tmp96 = tmp92 + tmp95
tmp97 = tmp81 / tmp96
tmp98 = tmp74 + tmp97
tmp100 = tmp1 - tmp99
tmp102 = tmp101 + tmp4
tmp103 = tmp102 - tmp6
tmp104 = triton_helpers.maximum(tmp8, tmp103)
tmp105 = tmp100 * tmp104
tmp107 = tmp1 - tmp106
tmp108 = tmp100 + tmp107
tmp110 = tmp1 - tmp109
tmp111 = tmp108 + tmp110
tmp113 = tmp1 - tmp112
tmp114 = tmp111 + tmp113
tmp115 = tmp105 / tmp114
tmp117 = tmp116 + tmp4
tmp118 = tmp117 - tmp6
tmp119 = triton_helpers.maximum(tmp8, tmp118)
tmp120 = tmp107 * tmp119
tmp121 = tmp120 / tmp114
tmp122 = tmp115 + tmp121
tmp124 = tmp123 + tmp4
tmp125 = tmp124 - tmp6
tmp126 = triton_helpers.maximum(tmp8, tmp125)
tmp127 = tmp110 * tmp126
tmp128 = tmp127 / tmp114
tmp129 = tmp122 + tmp128
tmp131 = tmp130 + tmp4
tmp132 = tmp131 - tmp6
tmp133 = triton_helpers.maximum(tmp8, tmp132)
tmp134 = tmp113 * tmp133
tmp135 = tmp134 / tmp114
tmp136 = tmp129 + tmp135
tmp137 = tl.broadcast_to(tmp98, [XBLOCK, RBLOCK])
tmp139 = tl.sum(tmp137, 1)[:, None]
tmp140 = tl.broadcast_to(tmp136, [XBLOCK, RBLOCK])
tmp142 = tl.sum(tmp140, 1)[:, None]
tmp143 = 4.0
tmp144 = tmp139 / tmp143
tmp145 = 0.5
tmp146 = tmp144 * tmp145
tmp147 = tmp142 / tmp143
tmp148 = tmp147 * tmp145
tmp149 = tmp146 + tmp148
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp149, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_ones_like_relu_sub_sum_0[grid(1)](
buf4, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class SimilarityLoss(nn.Module):
def __init__(self):
super(SimilarityLoss, self).__init__()
@abstractmethod
def forward(self, inputs, targets):
pass
class RankingLossNew(SimilarityLoss):
"""
Triplet ranking loss between pair similarities and pair labels.
"""
def __init__(self, margin=0.1, direction_weights=[0.5, 0.5]):
super(RankingLossNew, self).__init__()
self.margin = margin
self.direction_weights = direction_weights
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| bogdankostic/flair | RankingLoss | false | 6,351 | [
"MIT"
] | 1 | 8cf03eab19512e94c1bcb4a30409bb065d37fe25 | https://github.com/bogdankostic/flair/tree/8cf03eab19512e94c1bcb4a30409bb065d37fe25 | import torch
from abc import abstractmethod
import torch.utils.data.dataloader
import torch.nn.functional as F
import torch.nn as nn
import torch.nn
import torch.optim.optimizer
class SimilarityLoss(nn.Module):
def __init__(self):
super().__init__()
@abstractmethod
def forward(self, inputs, targets):
pass
class Model(SimilarityLoss):
"""
Triplet ranking loss between pair similarities and pair labels.
"""
def __init__(self, margin=0.1, direction_weights=[0.5, 0.5]):
super().__init__()
self.margin = margin
self.direction_weights = direction_weights
def forward(self, inputs, targets):
n = inputs.shape[0]
neg_targets = torch.ones_like(targets) - targets
ranking_loss_matrix_01 = neg_targets * F.relu(self.margin + inputs -
torch.diag(inputs).view(n, 1))
ranking_loss_matrix_10 = neg_targets * F.relu(self.margin + inputs -
torch.diag(inputs).view(1, n))
neg_targets_01_sum = torch.sum(neg_targets, dim=1)
neg_targets_10_sum = torch.sum(neg_targets, dim=0)
loss = self.direction_weights[0] * torch.mean(torch.sum(
ranking_loss_matrix_01 / neg_targets_01_sum, dim=1)
) + self.direction_weights[1] * torch.mean(torch.sum(
ranking_loss_matrix_10 / neg_targets_10_sum, dim=0))
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
AttnModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/se/cselmb52zw6k76vjfnlz46twqd7xtmz5eapmkzelmdbzmrywqb6z.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %expand_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = (xindex // 32)
x1 = (xindex // 8) % 4
x3 = (xindex // 128)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + (16*x3) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# hidden => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hb/chbekjydkfht5hbhbwmu672irpklgukrdgntmgqh7ge2e7c7wvow.py
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# output_2 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_5, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_5, %mul), kwargs = {})
triton_poi_fused_leaky_relu_4 = async_compile.triton('triton_poi_fused_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_6
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, primals_1, out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_4.run(buf8, primals_8, buf9, buf10, 64, grid=grid(64), stream=stream0)
del buf8
del primals_8
return (buf10, buf6, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class AttnModel(nn.Module):
"""
Attention Model
:param dim: int, size of hidden vector
:param dropout: float, dropout rate of attention model
"""
def __init__(self, dim, dropout):
super(AttnModel, self).__init__()
self.score = MLP(dim * 2, 1, dim, 0, n_layers=2, act=torch.tanh)
self.fw = nn.Linear(dim, dim)
self.dropout = nn.Dropout(p=dropout)
return
def forward(self, q, k, v=None, mask=None):
"""
:param q: Tensor of (batch_size, hidden_size) or (batch_size, out_size, hidden_size), query, out_size = 1 if discarded
:param k: Tensor of (batch_size, in_size, hidden_size), key
:param v: Tensor of (batch_size, in_size, hidden_size), value, default = None, means v = k
:param mask: Tensor of (batch_size, in_size), key/value mask, where 0 means data and 1 means pad, default = None, means zero matrix
:returns: (output, attn)
output: Tensor of (batch_size, hidden_size) or (batch_size, out_size, hidden_size), attention output, shape according to q
attn: Tensor of (batch_size, in_size) or (batch_size, out_size, in_size), attention weight, shape according to q
"""
if v is None:
v = k
q_dim = q.dim()
if q_dim == 2:
q = q.unsqueeze(1)
output_size = q.size(1)
input_size = k.size(1)
qe = q.unsqueeze(2).expand(-1, -1, input_size, -1)
ke = k.unsqueeze(1).expand(-1, output_size, -1, -1)
score = self.score(torch.cat((qe, ke), dim=-1)).squeeze(-1)
if mask is not None:
score.masked_fill_(mask.unsqueeze(1).expand(-1, output_size, -1
), -float('inf'))
attn = F.softmax(score, dim=-1)
output = torch.bmm(attn, v)
if q_dim == 2:
output = output.squeeze(1)
attn = attn.squeeze(1)
output = F.leaky_relu(self.fw(self.dropout(output)))
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = xindex // 32
x1 = xindex // 8 % 4
x3 = xindex // 128
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_2, primals_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(256)](buf2, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_6
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.bmm(buf6, primals_1, out=buf7)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_4[grid(64)](buf8, primals_8, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf8
del primals_8
return buf10, buf6, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf2, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0
), buf9, primals_7, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1,
4), 0), primals_5
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class AttnModelNew(nn.Module):
"""
Attention Model
:param dim: int, size of hidden vector
:param dropout: float, dropout rate of attention model
"""
def __init__(self, dim, dropout):
super(AttnModelNew, self).__init__()
self.score = MLP(dim * 2, 1, dim, 0, n_layers=2, act=torch.tanh)
self.fw = nn.Linear(dim, dim)
self.dropout = nn.Dropout(p=dropout)
return
def forward(self, input_0, input_1):
primals_3 = self.score.l_in.weight
primals_4 = self.score.l_in.bias
primals_5 = self.score.l_out.weight
primals_6 = self.score.l_out.bias
primals_7 = self.fw.weight
primals_8 = self.fw.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| bigdata-ustc/DisenQNet | AttnModel | false | 6,352 | [
"MIT"
] | 1 | 908fadeb9b8d278450213deff70205703bd91da6 | https://github.com/bigdata-ustc/DisenQNet/tree/908fadeb9b8d278450213deff70205703bd91da6 | import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super().__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class Model(nn.Module):
"""
Attention Model
:param dim: int, size of hidden vector
:param dropout: float, dropout rate of attention model
"""
def __init__(self, dim, dropout):
super().__init__()
self.score = MLP(dim * 2, 1, dim, 0, n_layers=2, act=torch.tanh)
self.fw = nn.Linear(dim, dim)
self.dropout = nn.Dropout(p=dropout)
return
def forward(self, q, k, v=None, mask=None):
"""
:param q: Tensor of (batch_size, hidden_size) or (batch_size, out_size, hidden_size), query, out_size = 1 if discarded
:param k: Tensor of (batch_size, in_size, hidden_size), key
:param v: Tensor of (batch_size, in_size, hidden_size), value, default = None, means v = k
:param mask: Tensor of (batch_size, in_size), key/value mask, where 0 means data and 1 means pad, default = None, means zero matrix
:returns: (output, attn)
output: Tensor of (batch_size, hidden_size) or (batch_size, out_size, hidden_size), attention output, shape according to q
attn: Tensor of (batch_size, in_size) or (batch_size, out_size, in_size), attention weight, shape according to q
"""
if v is None:
v = k
q_dim = q.dim()
if q_dim == 2:
q = q.unsqueeze(1)
output_size = q.size(1)
input_size = k.size(1)
qe = q.unsqueeze(2).expand(-1, -1, input_size, -1)
ke = k.unsqueeze(1).expand(-1, output_size, -1, -1)
score = self.score(torch.cat((qe, ke), dim=-1)).squeeze(-1)
if mask is not None:
score.masked_fill_(mask.unsqueeze(1).expand(-1, output_size, -1
), -float('inf'))
attn = F.softmax(score, dim=-1)
output = torch.bmm(attn, v)
if q_dim == 2:
output = output.squeeze(1)
attn = attn.squeeze(1)
output = F.leaky_relu(self.fw(self.dropout(output)))
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 0.5]
|
QNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/u4/cu4w5mjboiqwklmwybc4xc4mprxqyu3y47th4pg5dr7zr6vxzsy4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (48, 4), (4, 1))
assert_size_stride(primals_2, (48, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 48), (48, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 48), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 48), (768, 192, 48, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 3072, grid=grid(3072), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 48), (48, 1), 0), reinterpret_tensor(primals_4, (48, 4), (1, 48), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 48), (48, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((48, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 48), (48, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (48, 4), (4, 1))
assert_size_stride(primals_2, (48,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 48), (48, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 48), (48, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 48), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 48), (768, 192, 48, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3072)](buf1,
primals_2, buf3, 3072, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 48),
(48, 1), 0), reinterpret_tensor(primals_4, (48, 4), (1, 48), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 48), (48, 1), 0), primals_4, buf3
class QNetworkNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
"""
super(QNetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bobiblazeski/navigation | QNetwork | false | 6,353 | [
"MIT"
] | 1 | bb863b4475a90ff26bede20af647ae4882a0f6fb | https://github.com/bobiblazeski/navigation/tree/bb863b4475a90ff26bede20af647ae4882a0f6fb | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xj/cxjpr2ute76xkk7edg7qlvolks2ggx2xwbrttteralhmvd2xsktw.py
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2
# x_1 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jr/cjr3mjuwemgklde3zy7lrqxunugyqui3f2hy6zj6ufkp5mfj3lry.py
# Topologically Sorted Source Nodes: [mul_1, wrapped_sqrt, pow_1, mul_2, add_1, mul_3, tanh, add_2, x_5], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add_1 => add_6
# add_2 => add_7
# mul_1 => mul_5
# mul_2 => mul_6
# mul_3 => mul_7
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# x_5 => mul_8
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_13, 3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_13, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add_6), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_7,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_7), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_10 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add_2
# x_3 => add_3
# x_9 => add_8
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, ), (1, ))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, wrapped_sqrt, pow_1, mul_2, add_1, mul_3, tanh, add_2, x_5], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
triton_poi_fused_add_mul_pow_sqrt_tanh_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0)
del primals_12
return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=GELU, norm_layer=nn.
LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio
), act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_10(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_add_mul_pow_sqrt_tanh_10[grid(256)](buf16, buf17,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=GELU, norm_layer=nn.
LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio
), act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| au55555/classification-pytorch | Block | false | 6,354 | [
"MIT"
] | 1 | 1937599ae6e688ed7af7470f69964fb6f97241c4 | https://github.com/au55555/classification-pytorch/tree/1937599ae6e688ed7af7470f69964fb6f97241c4 | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class GELU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Model(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=GELU, norm_layer=nn.
LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio
), act_layer=act_layer, drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MaskedLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vn/cvnovvgrk47qviqcskah6z34pushumedjq2l7peskzvjzbpu2sis.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# out_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %primals_4), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
from torch.nn.functional import *
class MaskedLinear(torch.nn.Linear):
def forward(self, x, mask):
out = super().forward(x)
if mask.is_floating_point():
out = out * mask
else:
out = out * mask.type_as(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.cuda
from torch.nn.functional import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf1, primals_2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class MaskedLinearNew(torch.nn.Linear):
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| bratao/DeepSpeed | MaskedLinear | false | 6,355 | [
"MIT"
] | 1 | c50d8955e942e5e26cf81835d59ec3f20ef8540d | https://github.com/bratao/DeepSpeed/tree/c50d8955e942e5e26cf81835d59ec3f20ef8540d | import torch
import torch.cuda
from torch.nn.functional import *
class Model(torch.nn.Linear):
def forward(self, x, mask):
out = super().forward(x)
if mask.is_floating_point():
out = out * mask
else:
out = out * mask.type_as(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CartpoleDQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/d4/cd4rmmsd46lha6l4ut5a6nmc4wf52z3irm2fxiokbhgjhmlh75hj.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 768, grid=grid(768), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 12), (12, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CartpoleDQN(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=12):
super(CartpoleDQN, self).__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, action_space_dim)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_space_dim': 4, 'action_space_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 12), (12, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(768)](buf1,
primals_2, buf3, 768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 12),
(12, 1), 0), reinterpret_tensor(primals_4, (12, 4), (1, 12), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 12), (12, 1), 0), primals_4, buf3
class CartpoleDQNNew(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=12):
super(CartpoleDQNNew, self).__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, action_space_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| breno-aberle/rl-pong-project | CartpoleDQN | false | 6,356 | [
"MIT"
] | 1 | 9dc0d12e4bbcdb2905d46f66e84fac6d70c7831d | https://github.com/breno-aberle/rl-pong-project/tree/9dc0d12e4bbcdb2905d46f66e84fac6d70c7831d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_space_dim, action_space_dim, hidden=12):
super().__init__()
self.hidden = hidden
self.fc1 = nn.Linear(state_space_dim, hidden)
self.fc2 = nn.Linear(hidden, action_space_dim)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AvgPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/av/cavrjo5qex5lv5rbdmd7anyz5qi77ed62nlfzgynnerjoe6rjudf.py
# Topologically Sorted Source Nodes: [sum_1, sum_2, kernel_out], Original ATen: [aten.sum, aten.mul]
# Source node to ATen node mapping:
# kernel_out => mul
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [2]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 0.0625), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(in_out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_1, sum_2, kernel_out], Original ATen: [aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch as th
class AvgPool2d(Module):
"""
This class is the beginning of an exact python port of the torch.nn.AvgPool2d
module. Because PySyft cannot hook into layers which are implemented in C++,
our special functionalities (such as encrypted computation) do not work with
torch.nn.AvgPool2d and so we must have python ports available for all layer types
which we seek to use.
Note that this module has been tested to ensure that it outputs the exact output
values that the main module outputs in the same order that the main module does.
However, there is often some rounding error of unknown origin, usually less than
1e-6 in magnitude.
This module has not yet been tested with GPUs but should work out of the box.
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
"""For information on the constructor arguments, please see PyTorch's
documentation in torch.nn.AvgPool2d"""
super().__init__()
assert padding == 0
assert ceil_mode is False
assert count_include_pad is True
assert divisor_override is None
if stride is None:
stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size)
def forward(self, data):
batch_size, out_channels, rows, cols = data.shape
kernel_results = list()
for i in range(0, rows - self.kernel_size + 1, self.stride):
for j in range(0, cols - self.kernel_size + 1, self.stride):
kernel_out = data[:, :, i:i + self.kernel_size, j:j + self.
kernel_size].sum(2).sum(2) * self._one_over_kernel_size
kernel_results.append(kernel_out.unsqueeze(2))
pred = th.cat(kernel_results, axis=2).view(batch_size, out_channels,
int(rows / self.stride), int(cols / self.stride))
return pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 + tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tmp22 + tmp29
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(in_out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0),
class AvgPool2dNew(Module):
"""
This class is the beginning of an exact python port of the torch.nn.AvgPool2d
module. Because PySyft cannot hook into layers which are implemented in C++,
our special functionalities (such as encrypted computation) do not work with
torch.nn.AvgPool2d and so we must have python ports available for all layer types
which we seek to use.
Note that this module has been tested to ensure that it outputs the exact output
values that the main module outputs in the same order that the main module does.
However, there is often some rounding error of unknown origin, usually less than
1e-6 in magnitude.
This module has not yet been tested with GPUs but should work out of the box.
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
"""For information on the constructor arguments, please see PyTorch's
documentation in torch.nn.AvgPool2d"""
super().__init__()
assert padding == 0
assert ceil_mode is False
assert count_include_pad is True
assert divisor_override is None
if stride is None:
stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| brandonhee/PySyft | AvgPool2d | false | 6,357 | [
"Apache-2.0"
] | 1 | 31217f28aa3d996b2bb84477fb15a990f0cb9a80 | https://github.com/brandonhee/PySyft/tree/31217f28aa3d996b2bb84477fb15a990f0cb9a80 | from torch.nn import Module
import torch
import torch as th
class Model(Module):
"""
This class is the beginning of an exact python port of the torch.nn.AvgPool2d
module. Because PySyft cannot hook into layers which are implemented in C++,
our special functionalities (such as encrypted computation) do not work with
torch.nn.AvgPool2d and so we must have python ports available for all layer types
which we seek to use.
Note that this module has been tested to ensure that it outputs the exact output
values that the main module outputs in the same order that the main module does.
However, there is often some rounding error of unknown origin, usually less than
1e-6 in magnitude.
This module has not yet been tested with GPUs but should work out of the box.
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
"""For information on the constructor arguments, please see PyTorch's
documentation in torch.nn.AvgPool2d"""
super().__init__()
assert padding == 0
assert ceil_mode is False
assert count_include_pad is True
assert divisor_override is None
if stride is None:
stride = kernel_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size)
def forward(self, data):
batch_size, out_channels, rows, cols = data.shape
kernel_results = list()
for i in range(0, rows - self.kernel_size + 1, self.stride):
for j in range(0, cols - self.kernel_size + 1, self.stride):
kernel_out = data[:, :, i:i + self.kernel_size, j:j + self.
kernel_size].sum(2).sum(2) * self._one_over_kernel_size
kernel_results.append(kernel_out.unsqueeze(2))
pred = th.cat(kernel_results, axis=2).view(batch_size, out_channels,
int(rows / self.stride), int(cols / self.stride))
return pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jc/cjcmonojgt5syfl4gwrg64cs7sexr7chwsyoox5gualzchbfx2uu.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_2), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sx/csxmixchkiwaaxctdsbdahgg6l6kte2lpjzox5oikoelz6hlpwyr.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1040
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 260
x1 = (xindex // 260)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((256*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + ((256*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 260, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + ((4*x1) + ((-256) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u2/cu2l6dx3mmsceog6k5g5ymaniat6gqe77ap35mcbeyh7afpuin2g.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yh/cyh4nxttmv2s545nye3lr4ggbk423yhruoum4fzb24b64iqxuimy.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_2, mul_2, where_2
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 260), (260, 1))
assert_size_stride(primals_6, (256, ), (1, ))
assert_size_stride(primals_7, (128, 256), (256, 1))
assert_size_stride(primals_8, (128, ), (1, ))
assert_size_stride(primals_9, (1, 128), (128, 1))
assert_size_stride(primals_10, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, 1024, grid=grid(1024), stream=stream0)
buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf0, primals_2, primals_4, buf2, 1040, grid=grid(1040), stream=stream0)
del primals_2
del primals_4
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), (1, 260), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf3, primals_6, buf4, buf5, 1024, grid=grid(1024), stream=stream0)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), (1, 256), 0), out=buf6)
buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf6, primals_8, buf7, buf8, 512, grid=grid(512), stream=stream0)
del buf6
del primals_8
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, buf8, reinterpret_tensor(primals_9, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf10)
del primals_10
return (buf10, primals_3, buf1, buf2, buf4, buf5, buf7, buf8, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 260), (260, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_unit(layer):
inp = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(inp)
return -lim, lim
class Critic(nn.Module):
def __init__(self, state_size, action_size, seed=2, fc1_units=256,
fc2_units=256, fc3_units=128):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 1)
self.reset_weights()
def reset_weights(self):
self.fc1.weight.data.uniform_(*hidden_unit(self.fc1))
self.fc2.weight.data.uniform_(*hidden_unit(self.fc2))
self.fc3.weight.data.uniform_(*hidden_unit(self.fc3))
self.fc4.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
x = F.leaky_relu(self.fc1(state))
x = torch.cat((x, action), dim=1)
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1040
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 260
x1 = xindex // 260
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (256 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 260, tl.int64)
tmp17 = tl.load(in_ptr3 + (4 * x1 + (-256 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 260), (260, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (128, 256), (256, 1))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (1, 128), (128, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(1024)](buf0, primals_2, buf1,
1024, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32)
triton_poi_fused_cat_1[grid(1040)](buf1, buf0, primals_2, primals_4,
buf2, 1040, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), (
1, 260), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(1024)](buf3, primals_6, buf4,
buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), (
1, 256), 0), out=buf6)
buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
triton_poi_fused_leaky_relu_3[grid(512)](buf6, primals_8, buf7,
buf8, 512, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del primals_8
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_10, buf8, reinterpret_tensor(primals_9,
(128, 1), (1, 128), 0), alpha=1, beta=1, out=buf10)
del primals_10
return (buf10, primals_3, buf1, buf2, buf4, buf5, buf7, buf8, primals_9,
primals_7, primals_5)
def hidden_unit(layer):
inp = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(inp)
return -lim, lim
class CriticNew(nn.Module):
def __init__(self, state_size, action_size, seed=2, fc1_units=256,
fc2_units=256, fc3_units=128):
super(CriticNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 1)
self.reset_weights()
def reset_weights(self):
self.fc1.weight.data.uniform_(*hidden_unit(self.fc1))
self.fc2.weight.data.uniform_(*hidden_unit(self.fc2))
self.fc3.weight.data.uniform_(*hidden_unit(self.fc3))
self.fc4.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0, input_1):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_9 = self.fc4.weight
primals_10 = self.fc4.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| bnriiitb/Deep-Reinforcement-Learning | Critic | false | 6,358 | [
"MIT"
] | 1 | 5649a9d86fbec32fe3ac9cbb923d0d3a4c692d1e | https://github.com/bnriiitb/Deep-Reinforcement-Learning/tree/5649a9d86fbec32fe3ac9cbb923d0d3a4c692d1e | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_unit(layer):
inp = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(inp)
return -lim, lim
class Model(nn.Module):
def __init__(self, state_size, action_size, seed=2, fc1_units=256,
fc2_units=256, fc3_units=128):
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 1)
self.reset_weights()
def reset_weights(self):
self.fc1.weight.data.uniform_(*hidden_unit(self.fc1))
self.fc2.weight.data.uniform_(*hidden_unit(self.fc2))
self.fc3.weight.data.uniform_(*hidden_unit(self.fc3))
self.fc4.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
x = F.leaky_relu(self.fc1(state))
x = torch.cat((x, action), dim=1)
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
SimpleModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# cross_entropy => div, exp, log, mul, neg, sub_1, sum_1, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_dim], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf3, buf1, primals_4, 1, 256, grid=grid(1), stream=stream0)
del buf1
return (buf3, primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
from torch.nn.functional import *
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False, rank=0):
super(SimpleModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.rank = rank
self.empty_grad = empty_grad
def forward(self, x, y):
hidden_dim = x
if self.rank == 0 and self.empty_grad:
hidden_dim = self.linear(hidden_dim) + self.linear2(hidden_dim)
else:
hidden_dim = self.linear(hidden_dim)
return self.cross_entropy_loss(hidden_dim, y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.cuda
from torch.nn.functional import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3, buf1,
primals_4, 1, 256, num_warps=2, num_stages=1)
del buf1
return buf3, primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0
class SimpleModelNew(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False, rank=0):
super(SimpleModelNew, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.rank = rank
self.empty_grad = empty_grad
def forward(self, input_0, input_1):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| bratao/DeepSpeed | SimpleModel | false | 6,359 | [
"MIT"
] | 1 | c50d8955e942e5e26cf81835d59ec3f20ef8540d | https://github.com/bratao/DeepSpeed/tree/c50d8955e942e5e26cf81835d59ec3f20ef8540d | import torch
import torch.cuda
from torch.nn.functional import *
class Model(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False, rank=0):
super().__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
self.rank = rank
self.empty_grad = empty_grad
def forward(self, x, y):
hidden_dim = x
if self.rank == 0 and self.empty_grad:
hidden_dim = self.linear(hidden_dim) + self.linear2(hidden_dim)
else:
hidden_dim = self.linear(hidden_dim)
return self.cross_entropy_loss(hidden_dim, y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Mid_block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4w/c4wimnb7vlxk5u7piuh5g7aw5vqpw73l2dqrk26tak7tuyghtgkf.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class Mid_block(nn.Module):
def __init__(self, chanIn, chanOut, ks=3, stride=1):
super().__init__()
self.conv1 = nn.Conv3d(chanIn, chanOut, ks, padding=1)
self.conv2 = nn.Conv3d(chanOut, chanOut, ks, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'chanIn': 4, 'chanOut': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4,
4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4, 4,
4), (256, 64, 16, 4, 1), 0), buf1
class Mid_blockNew(nn.Module):
def __init__(self, chanIn, chanOut, ks=3, stride=1):
super().__init__()
self.conv1 = nn.Conv3d(chanIn, chanOut, ks, padding=1)
self.conv2 = nn.Conv3d(chanOut, chanOut, ks, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| basharbme/3d_segmentation | Mid_block | false | 6,360 | [
"MIT"
] | 1 | efcd966f74ebb74614515c38930e820ea1c4744e | https://github.com/basharbme/3d_segmentation/tree/efcd966f74ebb74614515c38930e820ea1c4744e | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, chanIn, chanOut, ks=3, stride=1):
super().__init__()
self.conv1 = nn.Conv3d(chanIn, chanOut, ks, padding=1)
self.conv2 = nn.Conv3d(chanOut, chanOut, ks, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MaskedLinearSeqDup | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nj/cnjvzg7wxccqdqia57zwzape445i4tqqznreiti3jgzqz7udixwn.py
# Topologically Sorted Source Nodes: [dup], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# dup => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1.38), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.38
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mi/cmiiwxoah5ey2kqqnzudacqixy3ly5gbliq33toscp3owmkoccoy.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# out_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %primals_4), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dup], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf2, primals_3, primals_4, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf0, buf2, primals_4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
from torch.nn.functional import *
class MaskedLinear(torch.nn.Linear):
def forward(self, x, mask):
out = super().forward(x)
if mask.is_floating_point():
out = out * mask
else:
out = out * mask.type_as(out)
return out
class MaskedLinearSeq(MaskedLinear):
"""Tests pipeline modules by also returning the mask."""
def forward(self, x, mask):
return super().forward(x, mask), mask
class MaskedLinearSeqDup(MaskedLinearSeq):
"""MaskedLinearSeq, but with more outputs than inputs and in a different order."""
def forward(self, x, mask):
dup = x.clone().detach() * 1.38
x, mask = super().forward(x, mask)
return dup, x, mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.cuda
from torch.nn.functional import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.38
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_1[grid(256)](buf2, primals_3, primals_4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf0, buf2, primals_4, reinterpret_tensor(primals_1, (64, 4), (4,
1), 0)
class MaskedLinear(torch.nn.Linear):
def forward(self, x, mask):
out = super().forward(x)
if mask.is_floating_point():
out = out * mask
else:
out = out * mask.type_as(out)
return out
class MaskedLinearSeq(MaskedLinear):
"""Tests pipeline modules by also returning the mask."""
def forward(self, x, mask):
return super().forward(x, mask), mask
class MaskedLinearSeqDupNew(MaskedLinearSeq):
"""MaskedLinearSeq, but with more outputs than inputs and in a different order."""
def forward(self, input_0, input_1):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1], output[2]
| bratao/DeepSpeed | MaskedLinearSeqDup | false | 6,361 | [
"MIT"
] | 1 | c50d8955e942e5e26cf81835d59ec3f20ef8540d | https://github.com/bratao/DeepSpeed/tree/c50d8955e942e5e26cf81835d59ec3f20ef8540d | import torch
import torch.cuda
from torch.nn.functional import *
class MaskedLinear(torch.nn.Linear):
def forward(self, x, mask):
out = super().forward(x)
if mask.is_floating_point():
out = out * mask
else:
out = out * mask.type_as(out)
return out
class MaskedLinearSeq(MaskedLinear):
"""Tests pipeline modules by also returning the mask."""
def forward(self, x, mask):
return super().forward(x, mask), mask
class Model(MaskedLinearSeq):
"""MaskedLinearSeq, but with more outputs than inputs and in a different order."""
def forward(self, x, mask):
dup = x.clone().detach() * 1.38
x, mask = super().forward(x, mask)
return dup, x, mask
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MultiChannelCombinedScorer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nn/cnn2xob7rn3fx6qxfv2bhg4jsupl7tzhvinbqywlxmtzieg5auh3.py
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
# Source node to ATen node mapping:
# getitem => index
# Graph fragment:
# %index : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [None, %full_default]), kwargs = {})
triton_poi_fused_index_0 = async_compile.triton('triton_poi_fused_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yt/cyt7p7f4dlczykxg27so55ce4rw5jnval5ey4qhjrhky64jgdpqr.py
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# output => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%index, %primals_2, %primals_3, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7q/c7qfog52ki6nxnbdejvrpfr2osgoaqkeut4gkxjvfzlmqmymffht.py
# Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# output_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/to/cto7jphpwb3liixmweivn3bu3rayxtlm4rku76zhbas4zeh4shu3.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# interpolate => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.25), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6tl5uzryqoxj2kgigy3jdrfml3hupgqtrnoaykev2o5qoqkqnd2.py
# Topologically Sorted Source Nodes: [getitem_1], Original ATen: [aten.index]
# Source node to ATen node mapping:
# getitem_1 => index_1
# Graph fragment:
# %index_1 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [None, %full_default_1]), kwargs = {})
triton_poi_fused_index_4 = async_compile.triton('triton_poi_fused_index_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c2/cc2ovgc24vv5pkrhdyfonwhyasfgzhcylaowud3yw6avucbayxm6.py
# Topologically Sorted Source Nodes: [output_3, interpolate, output1, output_7, interpolate_1, output2, output3, output4, mul, mul_1, mul_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# interpolate => _unsafe_index
# interpolate_1 => _unsafe_index_1
# mul => mul_8
# mul_1 => mul_9
# mul_2 => mul_10
# output1 => sigmoid
# output2 => sigmoid_1
# output3 => sigmoid_2
# output4 => sigmoid_3
# output_3 => convolution_3
# output_7 => convolution_7
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_3, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%_unsafe_index,), kwargs = {})
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_16, %primals_17, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%_unsafe_index_1,), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%index,), kwargs = {})
# %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%index_1,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sigmoid_1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %sigmoid_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %sigmoid_3), kwargs = {})
triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (0))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (0))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr5 + (x3), xmask)
tmp19 = tl.load(in_ptr6 + (x3), xmask)
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp12 = tmp9 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tl.sigmoid(tmp17)
tmp20 = tl.sigmoid(tmp19)
tmp21 = tl.sigmoid(tmp12)
tmp22 = tl.sigmoid(tmp16)
tmp23 = tmp21 * tmp22
tmp24 = tmp23 * tmp18
tmp25 = tmp24 * tmp20
tl.store(out_ptr0 + (x3), tmp12, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
tl.store(out_ptr2 + (x3), tmp18, xmask)
tl.store(out_ptr3 + (x3), tmp20, xmask)
tl.store(out_ptr4 + (x3), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (10, ), (1, ))
assert_size_stride(primals_4, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (10, ), (1, ))
assert_size_stride(primals_6, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_7, (10, ), (1, ))
assert_size_stride(primals_8, (1, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_11, (10, ), (1, ))
assert_size_stride(primals_12, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_13, (10, ), (1, ))
assert_size_stride(primals_14, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_15, (10, ), (1, ))
assert_size_stride(primals_16, (1, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_17, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
stream0 = get_raw_stream(0)
triton_poi_fused_index_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 10, 2, 2), (40, 4, 2, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 160, grid=grid(160), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 10, 1, 1), (10, 1, 1, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 40, grid=grid(40), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 10, 1, 1), (10, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, output_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf6, primals_7, 40, grid=grid(40), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 1), (1, 1, 1, 1))
buf8 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_3.run(buf8, 4, grid=grid(4), stream=stream0)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [getitem_1], Original ATen: [aten.index]
triton_poi_fused_index_4.run(primals_1, buf10, 64, grid=grid(64), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 10, 2, 2), (40, 4, 2, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, output_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf12, primals_11, 160, grid=grid(160), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 10, 1, 1), (10, 1, 1, 1))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, output_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf14, primals_13, 40, grid=grid(40), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 10, 1, 1), (10, 1, 1, 1))
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, output_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf16, primals_15, 40, grid=grid(40), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [output_7], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_16, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 1, 1, 1), (1, 1, 1, 1))
buf9 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_3, interpolate, output1, output_7, interpolate_1, output2, output3, output4, mul, mul_1, mul_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sigmoid, aten.mul]
triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5.run(buf8, buf7, primals_9, buf17, primals_17, buf0, buf10, buf9, buf18, buf19, buf20, buf21, 64, grid=grid(64), stream=stream0)
del buf17
del buf7
del primals_17
del primals_9
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf0, buf2, buf4, buf6, buf8, buf9, buf10, buf12, buf14, buf16, buf18, buf19, buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((10, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((10, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
class FociDetector(nn.Module):
def __init__(self, input_channels=3, input_size=17, ksize=5,
hidden_channels=10):
super(FociDetector, self).__init__()
self.conv1 = nn.Conv2d(input_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv3 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.finalmapsize = ksize
self.convf = nn.Conv2d(hidden_channels, 1, self.finalmapsize,
padding=int((ksize - 1) / 2))
self.relu = nn.ReLU()
def forward(self, x):
output = self.relu(self.conv1(x))
output = self.relu(self.conv2(output))
output = self.relu(self.conv3(output))
output = self.convf(output)
return output
class MultiChannelCombinedScorer(nn.Module):
def __init__(self, input_size=17, ksize=5, hidden_channels=10):
super(MultiChannelCombinedScorer, self).__init__()
self.channel1 = FociDetector(input_channels=1, input_size=
input_size, ksize=ksize, hidden_channels=hidden_channels)
self.channel2 = FociDetector(input_channels=1, input_size=
input_size, ksize=ksize, hidden_channels=hidden_channels)
def forward(self, x):
output1 = torch.sigmoid(F.interpolate(self.channel1(x[:, [0], :, :]
), size=(x.shape[2], x.shape[3])))
output2 = torch.sigmoid(F.interpolate(self.channel2(x[:, [1], :, :]
), size=(x.shape[2], x.shape[3])))
output3 = torch.sigmoid(x[:, [0], :, :])
output4 = torch.sigmoid(x[:, [1], :, :])
return output1 * output2 * output3 * output4
def forward_vis(self, x):
output1 = torch.sigmoid(F.interpolate(self.channel1(x[:, [0], :, :]
), size=(x.shape[2], x.shape[3])))
output2 = torch.sigmoid(F.interpolate(self.channel2(x[:, [1], :, :]
), size=(x.shape[2], x.shape[3])))
output3 = torch.sigmoid(x[:, [0], :, :])
output4 = torch.sigmoid(x[:, [1], :, :])
return output1, output2, output3, output4
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 10
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_index_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr5 + x3, xmask)
tmp19 = tl.load(in_ptr6 + x3, xmask)
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tl.where(tmp7, tmp6, tmp5)
tmp12 = tmp9 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tl.sigmoid(tmp17)
tmp20 = tl.sigmoid(tmp19)
tmp21 = tl.sigmoid(tmp12)
tmp22 = tl.sigmoid(tmp16)
tmp23 = tmp21 * tmp22
tmp24 = tmp23 * tmp18
tmp25 = tmp24 * tmp20
tl.store(out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
tl.store(out_ptr2 + x3, tmp18, xmask)
tl.store(out_ptr3 + x3, tmp20, xmask)
tl.store(out_ptr4 + x3, tmp25, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_7, (10,), (1,))
assert_size_stride(primals_8, (1, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_11, (10,), (1,))
assert_size_stride(primals_12, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_13, (10,), (1,))
assert_size_stride(primals_14, (10, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_15, (10,), (1,))
assert_size_stride(primals_16, (1, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_17, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 10, 2, 2), (40, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(160)](buf2, primals_3, 160,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 10, 1, 1), (10, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(40)](buf4, primals_5, 40,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 10, 1, 1), (10, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_2[grid(40)](buf6, primals_7, 40,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 1), (1, 1, 1, 1))
buf8 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_3[grid(4)](buf8, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused_index_4[grid(64)](primals_1, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 10, 2, 2), (40, 4, 2, 1))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_1[grid(160)](buf12, primals_11,
160, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 10, 1, 1), (10, 1, 1, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_2[grid(40)](buf14, primals_13, 40,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 10, 1, 1), (10, 1, 1, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_2[grid(40)](buf16, primals_15, 40,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_15
buf17 = extern_kernels.convolution(buf16, primals_16, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 1, 1, 1), (1, 1, 1, 1))
buf9 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused__unsafe_index_convolution_mul_sigmoid_5[grid(64)](buf8
, buf7, primals_9, buf17, primals_17, buf0, buf10, buf9, buf18,
buf19, buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf17
del buf7
del primals_17
del primals_9
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, buf0, buf2, buf4, buf6, buf8,
buf9, buf10, buf12, buf14, buf16, buf18, buf19, buf20)
class FociDetector(nn.Module):
def __init__(self, input_channels=3, input_size=17, ksize=5,
hidden_channels=10):
super(FociDetector, self).__init__()
self.conv1 = nn.Conv2d(input_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv3 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.finalmapsize = ksize
self.convf = nn.Conv2d(hidden_channels, 1, self.finalmapsize,
padding=int((ksize - 1) / 2))
self.relu = nn.ReLU()
def forward(self, x):
output = self.relu(self.conv1(x))
output = self.relu(self.conv2(output))
output = self.relu(self.conv3(output))
output = self.convf(output)
return output
class MultiChannelCombinedScorerNew(nn.Module):
def __init__(self, input_size=17, ksize=5, hidden_channels=10):
super(MultiChannelCombinedScorerNew, self).__init__()
self.channel1 = FociDetector(input_channels=1, input_size=
input_size, ksize=ksize, hidden_channels=hidden_channels)
self.channel2 = FociDetector(input_channels=1, input_size=
input_size, ksize=ksize, hidden_channels=hidden_channels)
def forward_vis(self, x):
output1 = torch.sigmoid(F.interpolate(self.channel1(x[:, [0], :, :]
), size=(x.shape[2], x.shape[3])))
output2 = torch.sigmoid(F.interpolate(self.channel2(x[:, [1], :, :]
), size=(x.shape[2], x.shape[3])))
output3 = torch.sigmoid(x[:, [0], :, :])
output4 = torch.sigmoid(x[:, [1], :, :])
return output1, output2, output3, output4
def forward(self, input_0):
primals_2 = self.channel1.conv1.weight
primals_3 = self.channel1.conv1.bias
primals_4 = self.channel1.conv2.weight
primals_5 = self.channel1.conv2.bias
primals_6 = self.channel1.conv3.weight
primals_7 = self.channel1.conv3.bias
primals_8 = self.channel1.convf.weight
primals_9 = self.channel1.convf.bias
primals_10 = self.channel2.conv1.weight
primals_11 = self.channel2.conv1.bias
primals_12 = self.channel2.conv2.weight
primals_13 = self.channel2.conv2.bias
primals_14 = self.channel2.conv3.weight
primals_15 = self.channel2.conv3.bias
primals_16 = self.channel2.convf.weight
primals_17 = self.channel2.convf.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| bharath272/centrosome-analysis | MultiChannelCombinedScorer | false | 6,362 | [
"MIT"
] | 1 | 6ae3744be464812b3767909420d7b78cea9da670 | https://github.com/bharath272/centrosome-analysis/tree/6ae3744be464812b3767909420d7b78cea9da670 | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
class FociDetector(nn.Module):
def __init__(self, input_channels=3, input_size=17, ksize=5,
hidden_channels=10):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv2 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.conv3 = nn.Conv2d(hidden_channels, hidden_channels, ksize,
stride=2, padding=int((ksize - 1) / 2))
self.finalmapsize = ksize
self.convf = nn.Conv2d(hidden_channels, 1, self.finalmapsize,
padding=int((ksize - 1) / 2))
self.relu = nn.ReLU()
def forward(self, x):
output = self.relu(self.conv1(x))
output = self.relu(self.conv2(output))
output = self.relu(self.conv3(output))
output = self.convf(output)
return output
class Model(nn.Module):
def __init__(self, input_size=17, ksize=5, hidden_channels=10):
super().__init__()
self.channel1 = FociDetector(input_channels=1, input_size=
input_size, ksize=ksize, hidden_channels=hidden_channels)
self.channel2 = FociDetector(input_channels=1, input_size=
input_size, ksize=ksize, hidden_channels=hidden_channels)
def forward(self, x):
output1 = torch.sigmoid(F.interpolate(self.channel1(x[:, [0], :, :]
), size=(x.shape[2], x.shape[3])))
output2 = torch.sigmoid(F.interpolate(self.channel2(x[:, [1], :, :]
), size=(x.shape[2], x.shape[3])))
output3 = torch.sigmoid(x[:, [0], :, :])
output4 = torch.sigmoid(x[:, [1], :, :])
return output1 * output2 * output3 * output4
def forward_vis(self, x):
output1 = torch.sigmoid(F.interpolate(self.channel1(x[:, [0], :, :]
), size=(x.shape[2], x.shape[3])))
output2 = torch.sigmoid(F.interpolate(self.channel2(x[:, [1], :, :]
), size=(x.shape[2], x.shape[3])))
output3 = torch.sigmoid(x[:, [0], :, :])
output4 = torch.sigmoid(x[:, [1], :, :])
return output1, output2, output3, output4
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SmoothBCEwLogits | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lk/clkucjdow3uaxprosshl3chftjoqkfj653zmoyf4qvo2pyac4ss6.py
# Topologically Sorted Source Nodes: [mul, targets, loss, loss_1], Original ATen: [aten.mul, aten.add, aten.binary_cross_entropy_with_logits, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, exp, full_default, log1p, mean, minimum, mul_1, neg, sub, sub_1, sub_2
# loss_1 => mean_1
# mul => mul
# targets => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mean,), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp5 = tmp1 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tl_math.abs(tmp6)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp7 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = tmp19 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, targets, loss, loss_1], Original ATen: [aten.mul, aten.add, aten.binary_cross_entropy_with_logits, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
class SmoothBCEwLogits(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0,
pos_weight=None):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
self.pos_weight = pos_weight
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, inputs, targets):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1) + 1e-06,
self.smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets, self.
weight, pos_weight=self.pos_weight)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
from torch.nn.modules.loss import _WeightedLoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp5 = tmp1 - tmp4
tmp7 = tmp5 * tmp6
tmp8 = triton_helpers.minimum(tmp3, tmp6)
tmp9 = tl_math.abs(tmp6)
tmp10 = -tmp9
tmp11 = tl_math.exp(tmp10)
tmp12 = libdevice.log1p(tmp11)
tmp13 = tmp8 - tmp12
tmp14 = tmp7 - tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = tmp19 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_0[grid
(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SmoothBCEwLogitsNew(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0,
pos_weight=None):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
self.pos_weight = pos_weight
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| broadinstitute/lincs-profiling-comparison | SmoothBCEwLogits | false | 6,363 | [
"BSD-3-Clause"
] | 1 | 075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d | https://github.com/broadinstitute/lincs-profiling-comparison/tree/075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d | import torch
import torch.utils.data
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
class Model(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0,
pos_weight=None):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
self.pos_weight = pos_weight
@staticmethod
def _smooth(targets: 'torch.Tensor', n_labels: 'int', smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, inputs, targets):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1) + 1e-06,
self.smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets, self.
weight, pos_weight=self.pos_weight)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/d4/cd4cz72ypskbhnv4a7gnddefzi57culjgwetyckhq2jhh64z6rtn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dq/cdqniiyy2bh7exxjlmeiulcbmupg7rxyslwm7crl3ajwwld6usn2.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2b/c2bujjyeji7nhf4gfgxav4unhmpugynzwx2v63uhk7lp4nn5exsa.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (512, 1024), (1024, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (buf6, primals_1, buf1, buf3, buf5, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, input_seq_length, output_num_classes):
"""Initialize model layers"""
super(Net, self).__init__()
self.input_seq_length = input_seq_length
self.output_num_classes = output_num_classes
self.fc1 = nn.Linear(self.input_seq_length, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 256)
self.fc4 = nn.Linear(256, output_num_classes)
self.dropout = nn.Dropout(p=0.5, inplace=False)
def forward(self, x):
"""Forward pass through the model"""
x = x.view(x.shape[0], self.input_seq_length)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_seq_length': 4, 'output_num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1024, 4), (4, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (512, 1024), (1024, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (256, 512), (512, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (4, 256), (256, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024
), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(4096)](buf1, primals_3, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1024, 512),
(1, 1024), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(2048)](buf3, primals_5, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (512, 256), (
1, 512), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(1024)](buf5, primals_7, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_9
return buf6, primals_1, buf1, buf3, buf5, primals_8, primals_6, primals_4
class NetNew(nn.Module):
def __init__(self, input_seq_length, output_num_classes):
"""Initialize model layers"""
super(NetNew, self).__init__()
self.input_seq_length = input_seq_length
self.output_num_classes = output_num_classes
self.fc1 = nn.Linear(self.input_seq_length, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 256)
self.fc4 = nn.Linear(256, output_num_classes)
self.dropout = nn.Dropout(p=0.5, inplace=False)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| bradford415/multiclassification | Net | false | 6,364 | [
"MIT"
] | 1 | ee0234ec0a85b04f78cd86c3e5c52e5d658f19ac | https://github.com/bradford415/multiclassification/tree/ee0234ec0a85b04f78cd86c3e5c52e5d658f19ac | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_seq_length, output_num_classes):
"""Initialize model layers"""
super().__init__()
self.input_seq_length = input_seq_length
self.output_num_classes = output_num_classes
self.fc1 = nn.Linear(self.input_seq_length, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 256)
self.fc4 = nn.Linear(256, output_num_classes)
self.dropout = nn.Dropout(p=0.5, inplace=False)
def forward(self, x):
"""Forward pass through the model"""
x = x.view(x.shape[0], self.input_seq_length)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
L2loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jh/cjhmorbdlv6z74opl7hgkfvksiemvvmsjf5pztsmoepd5k6tw7r4.py
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# loss => div
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_per_fused_div_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, sum_1, loss], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class L2loss(torch.nn.Module):
def __init__(self):
super(L2loss, self).__init__()
def forward(self, y, yhat):
loss = (y - yhat).pow(2).sum() / y.shape[0]
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L2lossNew(torch.nn.Module):
def __init__(self):
super(L2lossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| btolooshams/densae | L2loss | false | 6,365 | [
"MIT"
] | 1 | a1e4c4cc1b4be0386d42136f2695615ea3cf4815 | https://github.com/btolooshams/densae/tree/a1e4c4cc1b4be0386d42136f2695615ea3cf4815 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y, yhat):
loss = (y - yhat).pow(2).sum() / y.shape[0]
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FeedForwardActorNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py
# Topologically Sorted Source Nodes: [activation1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# activation1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j4/cj4miacghwuwo6tmp3hylr7yjqyun32g4pisr65oc2dtlcxfwv2f.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# output => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uy/cuylqrd7ye33ogvvpsnxb3skali4boxth4tryw5hn4czjzyh4a34.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# output => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [activation1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf8, 4096, grid=grid(4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [activation2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
return (buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class FeedForwardActorNN(nn.Module):
def __init__(self, in_dim, out_dim, is_discrete):
super(FeedForwardActorNN, self).__init__()
self.layer1 = nn.Linear(in_dim, 64)
self.layer2 = nn.Linear(64, 64)
self.layer3 = nn.Linear(64, out_dim)
self.is_discrete = is_discrete
def forward(self, obs):
if isinstance(obs, np.ndarray):
obs = torch.tensor(obs, dtype=torch.float)
activation1 = F.relu(self.layer1(obs))
activation2 = F.relu(self.layer2(activation1))
if self.is_discrete:
output = torch.softmax(self.layer3(activation2), dim=0)
else:
output = self.layer3(activation2)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'is_discrete': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 64), (64, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_3, buf8, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class FeedForwardActorNNNew(nn.Module):
def __init__(self, in_dim, out_dim, is_discrete):
super(FeedForwardActorNNNew, self).__init__()
self.layer1 = nn.Linear(in_dim, 64)
self.layer2 = nn.Linear(64, 64)
self.layer3 = nn.Linear(64, out_dim)
self.is_discrete = is_discrete
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_6 = self.layer3.weight
primals_7 = self.layer3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| britig/policy-refinement-bo | FeedForwardActorNN | false | 6,366 | [
"MIT"
] | 1 | c8a1e347d6e27c991e945afae9b5d9b482806f4b | https://github.com/britig/policy-refinement-bo/tree/c8a1e347d6e27c991e945afae9b5d9b482806f4b | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_dim, out_dim, is_discrete):
super().__init__()
self.layer1 = nn.Linear(in_dim, 64)
self.layer2 = nn.Linear(64, 64)
self.layer3 = nn.Linear(64, out_dim)
self.is_discrete = is_discrete
def forward(self, obs):
if isinstance(obs, np.ndarray):
obs = torch.tensor(obs, dtype=torch.float)
activation1 = F.relu(self.layer1(obs))
activation2 = F.relu(self.layer2(activation1))
if self.is_discrete:
output = torch.softmax(self.layer3(activation2), dim=0)
else:
output = self.layer3(activation2)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Disc | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5n/c5nnm5j7zky5bxmyowjq4lc7gdpqhzs7nzbrb3occhq5mr35r7m6.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# hidden => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del primals_4
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_6
return (reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class Disc(nn.Module):
"""
2-layer discriminator for MI estimator
:param x_dim: int, size of x vector
:param y_dim: int, size of y vector
:param dropout: float, dropout rate
"""
def __init__(self, x_dim, y_dim, dropout):
super(Disc, self).__init__()
self.disc = MLP(x_dim + y_dim, 1, y_dim, dropout, n_layers=2)
return
def forward(self, x, y):
"""
:param x: Tensor of (batch_size, hidden_dim), x
:param y: Tensor of (batch_size, hidden_dim), y
:returns: Tensor of (batch_size), score
"""
input = torch.cat((x, y), dim=-1)
score = self.disc(input).squeeze(-1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'x_dim': 4, 'y_dim': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(256)](buf1, primals_4, buf2,
buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_6
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_5
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super(MLP, self).__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class DiscNew(nn.Module):
"""
2-layer discriminator for MI estimator
:param x_dim: int, size of x vector
:param y_dim: int, size of y vector
:param dropout: float, dropout rate
"""
def __init__(self, x_dim, y_dim, dropout):
super(DiscNew, self).__init__()
self.disc = MLP(x_dim + y_dim, 1, y_dim, dropout, n_layers=2)
return
def forward(self, input_0, input_1):
primals_3 = self.disc.l_in.weight
primals_4 = self.disc.l_in.bias
primals_5 = self.disc.l_out.weight
primals_6 = self.disc.l_out.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| bigdata-ustc/DisenQNet | Disc | false | 6,367 | [
"MIT"
] | 1 | 908fadeb9b8d278450213deff70205703bd91da6 | https://github.com/bigdata-ustc/DisenQNet/tree/908fadeb9b8d278450213deff70205703bd91da6 | import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
"""
Multi-Layer Perceptron
:param in_dim: int, size of input feature
:param n_classes: int, number of output classes
:param hidden_dim: int, size of hidden vector
:param dropout: float, dropout rate
:param n_layers: int, number of layers, at least 2, default = 2
:param act: function, activation function, default = leaky_relu
"""
def __init__(self, in_dim, n_classes, hidden_dim, dropout, n_layers=2,
act=F.leaky_relu):
super().__init__()
self.l_in = nn.Linear(in_dim, hidden_dim)
self.l_hs = nn.ModuleList(nn.Linear(hidden_dim, hidden_dim) for _ in
range(n_layers - 2))
self.l_out = nn.Linear(hidden_dim, n_classes)
self.dropout = nn.Dropout(p=dropout)
self.act = act
return
def forward(self, input):
"""
:param input: Tensor of (batch_size, in_dim), input feature
:returns: Tensor of (batch_size, n_classes), output class
"""
hidden = self.act(self.l_in(self.dropout(input)))
for l_h in self.l_hs:
hidden = self.act(l_h(self.dropout(hidden)))
output = self.l_out(self.dropout(hidden))
return output
class Model(nn.Module):
"""
2-layer discriminator for MI estimator
:param x_dim: int, size of x vector
:param y_dim: int, size of y vector
:param dropout: float, dropout rate
"""
def __init__(self, x_dim, y_dim, dropout):
super().__init__()
self.disc = MLP(x_dim + y_dim, 1, y_dim, dropout, n_layers=2)
return
def forward(self, x, y):
"""
:param x: Tensor of (batch_size, hidden_dim), x
:param y: Tensor of (batch_size, hidden_dim), y
:returns: Tensor of (batch_size), score
"""
input = torch.cat((x, y), dim=-1)
score = self.disc(input).squeeze(-1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
MultiheadAttentionWrapper | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/m5/cm5f2v4luzyczo3xcytmjg2h6fymvgarwaaqswwrj34olun37r6i.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %_scaled_dot_product_efficient_attention_default : [num_users=1] = call_function[target=torch.ops.aten._scaled_dot_product_efficient_attention.default](args = (%unsqueeze_default, %unsqueeze_default_1, %unsqueeze_default_2, None, False), kwargs = {scale: 1.0})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(arg1_1, buf1, 64, grid=grid(64), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(arg2_1, buf2, 64, grid=grid(64), stream=stream0)
del arg2_1
# Topologically Sorted Source Nodes: [], Original ATen: []
buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(buf0, buf1, buf2, None, False, scale=1.0)
del buf0
del buf1
del buf2
buf4 = buf3[0]
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class MultiheadAttentionWrapper(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size = self.qkv_dim[2]
def forward(self, query, key, value, key_padding_mask=None):
query = query.transpose(0, 1)
key = key.transpose(0, 1)
value = value.transpose(0, 1)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.qkv_dim[0]
q, k, v = query, key, value
if self.proj_on:
if self.dropout:
q, k = self.dropout(q), self.dropout(k)
q, k = [self.f(proj(input)) for input, proj in zip([query, key],
self.proj_modules)]
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.scale_on:
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.
qkv_head_dim[0]).transpose(0, 1)
k = k.contiguous().view(src_len, bsz * self.num_heads, self.
qkv_head_dim[1]).transpose(0, 1)
v = v.contiguous().view(src_len, bsz * self.num_heads, self.
qkv_head_dim[2]).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.float().masked_fill(key_padding_mask
.unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
if self.drop_diagonal:
assert attn_weights.size(1) == attn_weights.size(2)
diag_mask = torch.diag(attn_weights.data.new(attn_weights.size(
1)).zero_() + 1).byte().unsqueeze(0).expand_as(attn_weights)
attn_weights.data.masked_fill_(diag_mask, -float('inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = self.dropout(attn_weights)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
qkv_head_dim[2]]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, -1)
attn = attn.transpose(0, 1)
return attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'query_dim': 4, 'key_dim': 4, 'value_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1), tmp0, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
triton_poi_fused_0[grid(64)](arg1_1, buf1, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((1, 4, 4, 4), (64, 4, 16, 1), torch.float32)
triton_poi_fused_0[grid(64)](arg2_1, buf2, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del arg2_1
buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(
buf0, buf1, buf2, None, False, scale=1.0)
del buf0
del buf1
del buf2
buf4 = buf3[0]
del buf3
return reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0),
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super(DropoutWrapper, self).__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class MultiheadAttentionWrapperNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size = self.qkv_dim[2]
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| brightgems/BartWithRL | MultiheadAttentionWrapper | false | 6,368 | [
"MIT"
] | 1 | 17614c4009ec976cdc73dacaf94573a6d8f6d529 | https://github.com/brightgems/BartWithRL/tree/17614c4009ec976cdc73dacaf94573a6d8f6d529 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from torch.optim.lr_scheduler import *
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
def linear(x):
return x
def activation(func_a):
"""Activation function wrapper
"""
try:
f = eval(func_a)
except:
f = linear
return f
class DropoutWrapper(nn.Module):
"""
This is a dropout wrapper which supports the fix mask dropout
"""
def __init__(self, dropout_p=0, enable_vbp=True):
super().__init__()
"""variational dropout means fix dropout mask
ref: https://discuss.pytorch.org/t/dropout-for-rnns/633/11
"""
self.enable_variational_dropout = enable_vbp
self.dropout_p = dropout_p
def forward(self, x):
"""
:param x: batch * len * input_size
"""
if self.training is False or self.dropout_p == 0:
return x
if len(x.size()) == 3:
mask = 1.0 / (1 - self.dropout_p) * torch.bernoulli((1 - self.
dropout_p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1))
mask.requires_grad = False
return mask.unsqueeze(1).expand_as(x) * x
else:
return F.dropout(x, p=self.dropout_p, training=self.training)
class Model(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, query_dim, key_dim, value_dim, prefix='attention',
opt={}, dropout=None):
super().__init__()
self.prefix = prefix
self.num_heads = opt.get('{}_head'.format(self.prefix), 1)
self.dropout = DropoutWrapper(opt.get('{}_dropout'.format(self.
prefix), 0)) if dropout is None else dropout
self.qkv_dim = [query_dim, key_dim, value_dim]
assert query_dim == key_dim, 'query dim must equal with key dim'
self.hidden_size = opt.get('{}_hidden_size'.format(self.prefix), 64)
self.proj_on = opt.get('{}_proj_on'.format(prefix), False)
self.share = opt.get('{}_share'.format(self.prefix), False)
self.layer_norm_on = opt.get('{}_norm_on'.format(self.prefix), False)
self.scale_on = opt.get('{}_scale_on'.format(self.prefix), False)
if self.proj_on:
self.proj_modules = nn.ModuleList([nn.Linear(dim, self.
hidden_size) for dim in self.qkv_dim[0:2]])
if self.layer_norm_on:
for proj in self.proj_modules:
proj = weight_norm(proj)
if self.share and self.qkv_dim[0] == self.qkv_dim[1]:
self.proj_modules[1] = self.proj_modules[0]
self.f = activation(opt.get('{}_activation'.format(self.prefix),
'relu'))
self.qkv_head_dim = [self.hidden_size // self.num_heads] * 3
self.qkv_head_dim[2] = value_dim // self.num_heads
assert self.qkv_head_dim[0
] * self.num_heads == self.hidden_size, 'hidden size must be divisible by num_heads'
assert self.qkv_head_dim[2
] * self.num_heads == value_dim, 'value size must be divisible by num_heads'
else:
self.qkv_head_dim = [(emb // self.num_heads) for emb in self.
qkv_dim]
assert self.qkv_head_dim[0] * self.num_heads == self.qkv_dim[0
], 'query size must be divisible by num_heads'
assert self.qkv_head_dim[1] * self.num_heads == self.qkv_dim[1
], 'key size must be divisible by num_heads'
assert self.qkv_head_dim[2] * self.num_heads == self.qkv_dim[2
], 'value size must be divisible by num_heads'
if self.scale_on:
self.scaling = self.qkv_head_dim[0] ** -0.5
self.drop_diagonal = opt.get('{}_drop_diagonal'.format(self.prefix),
False)
self.output_size =
# ... truncated (>4000 chars) for memory efficiency |
CNNCifar | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gd/cgdmy4ginhdapyqwcubd4toeeysvtbsqefmslx573dquq6rbsilp.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 400) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gs/cgs3id7lhegk6e2zuodynkbioma7ch6ubjwbruprffk7ti3myfel.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = (xindex // 10)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (40*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (40*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (20 + (2*x0) + (40*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (21 + (2*x0) + (40*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yl/cylhzvm5azwoxpfzq6cmn4wrmtveszsjfaehpp6d3sldzewuqoy4.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 36) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kh/ckhx3mew2uyvhvz23bcgy3i5ochwmbzn7w2bv2x6pye4ss5plgn2.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (12*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (12*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (6 + (2*x0) + (12*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (7 + (2*x0) + (12*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ra/cramkcr2zx6wgnivm2zwijj7bp55vteteb62w7tmiei4s3irrabk.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qv/cqvjks2av4xz4prvu5ycoopkchxieqgdi4ptsrqgutk3r72rmc62.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 24, 24), (1728, 576, 24, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (384, 576), (576, 1))
assert_size_stride(primals_7, (384, ), (1, ))
assert_size_stride(primals_8, (192, 384), (384, 1))
assert_size_stride(primals_9, (192, ), (1, ))
assert_size_stride(primals_10, (4, 192), (192, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 20, 20), (25600, 400, 20, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 102400, grid=grid(102400), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.float32)
buf3 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 25600, grid=grid(25600), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 6, 6), (2304, 36, 6, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 9216, grid=grid(9216), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 64, 3, 3), (576, 9, 3, 1), torch.int8)
buf7 = empty_strided_cuda((4, 64, 3, 3), (576, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 2304, grid=grid(2304), stream=stream0)
buf8 = empty_strided_cuda((4, 384), (384, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 576), (576, 1), 0), reinterpret_tensor(primals_6, (576, 384), (1, 576), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 1536, grid=grid(1536), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 192), (192, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (384, 192), (1, 384), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 768, grid=grid(768), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (192, 4), (1, 192), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 576), (576, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 24, 24), (1728, 576, 24, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((384, 576), (576, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((384, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((192, 384), (384, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 192), (192, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.nn.functional as F
class CNNCifar(nn.Module):
def __init__(self, args):
super(CNNCifar, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 3 * 3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, args.num_classes)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 24, 24])]
def get_init_inputs():
return [[], {'args': _mock_config(num_classes=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 400 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 40 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 40 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (20 + 2 * x0 + 40 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (21 + 2 * x0 + 40 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 36 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 12 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 12 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (6 + 2 * x0 + 12 * x1), xmask, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (7 + 2 * x0 + 12 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 24, 24), (1728, 576, 24, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (384, 576), (576, 1))
assert_size_stride(primals_7, (384,), (1,))
assert_size_stride(primals_8, (192, 384), (384, 1))
assert_size_stride(primals_9, (192,), (1,))
assert_size_stride(primals_10, (4, 192), (192, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 20, 20), (25600, 400, 20, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(102400)](buf1, primals_2,
102400, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(25600)](buf1, buf2,
buf3, 25600, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 6, 6), (2304, 36, 6, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(9216)](buf5, primals_5,
9216, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 64, 3, 3), (576, 9, 3, 1), torch.int8)
buf7 = empty_strided_cuda((4, 64, 3, 3), (576, 9, 3, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(2304)](buf5, buf6,
buf7, 2304, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 384), (384, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 576), (576, 1), 0),
reinterpret_tensor(primals_6, (576, 384), (1, 576), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(1536)](buf9, primals_7, 1536, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 192), (192, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (384, 192), (
1, 384), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(768)](buf11, primals_9, 768, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (192, 4), (1, 192), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 576), (576, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class CNNCifarNew(nn.Module):
def __init__(self, args):
super(CNNCifarNew, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 3 * 3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, args.num_classes)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| bobvo23/Federated-Learning-PyTorch | CNNCifar | false | 6,369 | [
"MIT"
] | 1 | e5cffe8f39cfad76c13c78b9f1c6ef0976e4cc81 | https://github.com/bobvo23/Federated-Learning-PyTorch/tree/e5cffe8f39cfad76c13c78b9f1c6ef0976e4cc81 | from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 3 * 3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, args.num_classes)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 24, 24])]
def get_init_inputs():
return []
|
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bn/cbnew222tzmfnb6gyjabhnjtqqg4g3zgcay65lkbdxe3r5dtwlns.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hc/chcijhyx3lu745bcg6v7r4hf3yavmj7r7bm3wkq6rxzlxn7d6d2q.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_3 => tanh_1
# Graph fragment:
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, 8192, grid=grid(8192), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_3, (128, 4), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf3, 256, grid=grid(256), stream=stream0)
return (buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf1, buf3, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as th
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_size, output_size, hidden=128):
super(MLP, self).__init__()
self.linear1 = nn.Linear(input_size, hidden, bias=False)
self.linear2 = nn.Linear(hidden, output_size, bias=False)
def forward(self, x):
x = self.linear1(x)
x = th.tanh(x)
x = self.linear2(x)
x = th.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, None)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(8192)](buf1, 8192, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_3, (128, 4), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_1[grid(256)](buf3, 256, XBLOCK=256, num_warps
=4, num_stages=1)
return buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf1, buf3, primals_3
class MLPNew(nn.Module):
def __init__(self, input_size, output_size, hidden=128):
super(MLPNew, self).__init__()
self.linear1 = nn.Linear(input_size, hidden, bias=False)
self.linear2 = nn.Linear(hidden, output_size, bias=False)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_3 = self.linear2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bwubrian/cherry | MLP | false | 6,370 | [
"Apache-2.0"
] | 1 | de0cd2d833336144bce2a0b97e4dad40cbd78d7c | https://github.com/bwubrian/cherry/tree/de0cd2d833336144bce2a0b97e4dad40cbd78d7c | import torch
import torch as th
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, output_size, hidden=128):
super().__init__()
self.linear1 = nn.Linear(input_size, hidden, bias=False)
self.linear2 = nn.Linear(hidden, output_size, bias=False)
def forward(self, x):
x = self.linear1(x)
x = th.tanh(x)
x = self.linear2(x)
x = th.tanh(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Parseval_Conv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3n/c3nfeatzmd3jrrzexkoxor6y5icxdzoidhpoxg4f7nv2kjveupsf.py
# Topologically Sorted Source Nodes: [wrapped_sqrt, new_weight], Original ATen: [aten.sqrt, aten.div]
# Source node to ATen node mapping:
# new_weight => div
# wrapped_sqrt => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 5.744562646538029), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %full_default), kwargs = {})
triton_poi_fused_div_sqrt_0 = async_compile.triton('triton_poi_fused_div_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 5.744562646538029
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wrapped_sqrt, new_weight], Original ATen: [aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sqrt_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class Parseval_Conv2d(nn.Conv2d):
def forward(self, input):
new_weight = self.weight / np.sqrt(2 * self.kernel_size[0] * self.
kernel_size[1] + 1)
return F.conv2d(input, new_weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 5.744562646538029
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sqrt_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_3, buf0
class Parseval_Conv2dNew(nn.Conv2d):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| cadurosar/laplacian_networks | Parseval_Conv2d | false | 6,371 | [
"MIT"
] | 1 | 27f6f2d7145426b38f578e9c1beecae3e7392f1b | https://github.com/cadurosar/laplacian_networks/tree/27f6f2d7145426b38f578e9c1beecae3e7392f1b | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Conv2d):
def forward(self, input):
new_weight = self.weight / np.sqrt(2 * self.kernel_size[0] * self.
kernel_size[1] + 1)
return F.conv2d(input, new_weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SuperLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bw/cbwlw4mjc3qoiqwgmvj223as6fy3sq35zfln2hov5cibpe6tvt5k.py
# Topologically Sorted Source Nodes: [sub, pow_1, loss, mul, TotalLossVal], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul, aten.add]
# Source node to ATen node mapping:
# TotalLossVal => add
# loss => mean
# mul => mul
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.0), kwargs = {})
triton_per_fused_add_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, loss, mul, TotalLossVal], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sub_0.run(buf1, arg0_1, arg1_1, buf2, buf3, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, buf2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
import torch
class netMSELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
return self.computeLoss(output, target)
def computeLoss(self, output, target):
loss = torch.mean((output - target) ** 2)
return loss
class SuperLoss(nn.Module):
def __init__(self, Losses=[], Weights=[], Names=[]):
super().__init__()
if not Losses:
self.Losses = [netMSELoss()]
self.Weights = [1.0]
self.Names = ['Default MSE Loss']
else:
if len(Losses) != len(Weights):
raise RuntimeError(
'SuperLoss() given Losses and Weights dont match.')
self.Losses = Losses
self.Weights = Weights
self.Names = [('Subloss ' + str(i).zfill(2)) for i in range(len
(self.Losses))]
for Ctr, n in enumerate(Names, 0):
self.Names[Ctr] = n
self.cleanUp()
def __len__(self):
return len(self.Losses)
def getItems(self, withoutWeights=False):
RetLossValsFloat = []
if withoutWeights:
for v in self.LossVals:
RetLossValsFloat.append(v.item())
else:
for v in self.LossValsWeighted:
RetLossValsFloat.append(v.item())
return RetLossValsFloat
def cleanUp(self):
self.LossVals = [0.0] * len(self.Losses)
self.LossValsWeighted = [0.0] * len(self.Losses)
def forward(self, output, target):
self.cleanUp()
return self.computeLoss(output, target)
def computeLoss(self, output, target):
TotalLossVal = 0.0
for Ctr, (l, w) in enumerate(zip(self.Losses, self.Weights), 0):
LossVal = l.forward(output, target)
self.LossVals[Ctr] = LossVal
self.LossValsWeighted[Ctr] = w * LossVal
TotalLossVal += self.LossValsWeighted[Ctr]
return TotalLossVal
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = 0.0
tmp12 = tmp10 + tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1,
arg1_1, buf2, buf3, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3, buf2, buf1
class netMSELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
return self.computeLoss(output, target)
def computeLoss(self, output, target):
loss = torch.mean((output - target) ** 2)
return loss
class SuperLossNew(nn.Module):
def __init__(self, Losses=[], Weights=[], Names=[]):
super().__init__()
if not Losses:
self.Losses = [netMSELoss()]
self.Weights = [1.0]
self.Names = ['Default MSE Loss']
else:
if len(Losses) != len(Weights):
raise RuntimeError(
'SuperLoss() given Losses and Weights dont match.')
self.Losses = Losses
self.Weights = Weights
self.Names = [('Subloss ' + str(i).zfill(2)) for i in range(len
(self.Losses))]
for Ctr, n in enumerate(Names, 0):
self.Names[Ctr] = n
self.cleanUp()
def __len__(self):
return len(self.Losses)
def getItems(self, withoutWeights=False):
RetLossValsFloat = []
if withoutWeights:
for v in self.LossVals:
RetLossValsFloat.append(v.item())
else:
for v in self.LossValsWeighted:
RetLossValsFloat.append(v.item())
return RetLossValsFloat
def cleanUp(self):
self.LossVals = [0.0] * len(self.Losses)
self.LossValsWeighted = [0.0] * len(self.Losses)
def computeLoss(self, output, target):
TotalLossVal = 0.0
for Ctr, (l, w) in enumerate(zip(self.Losses, self.Weights), 0):
LossVal = l.forward(output, target)
self.LossVals[Ctr] = LossVal
self.LossValsWeighted[Ctr] = w * LossVal
TotalLossVal += self.LossValsWeighted[Ctr]
return TotalLossVal
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| brown-ivl/beacon | SuperLoss | false | 6,372 | [
"MIT"
] | 1 | 66a1714473b362294f787f261561e39c52f00e42 | https://github.com/brown-ivl/beacon/tree/66a1714473b362294f787f261561e39c52f00e42 | import torch
import torch.utils.data
from torch import nn
import torch
class netMSELoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target):
return self.computeLoss(output, target)
def computeLoss(self, output, target):
loss = torch.mean((output - target) ** 2)
return loss
class Model(nn.Module):
def __init__(self, Losses=[], Weights=[], Names=[]):
super().__init__()
if not Losses:
self.Losses = [netMSELoss()]
self.Weights = [1.0]
self.Names = ['Default MSE Loss']
else:
if len(Losses) != len(Weights):
raise RuntimeError(
'SuperLoss() given Losses and Weights dont match.')
self.Losses = Losses
self.Weights = Weights
self.Names = [('Subloss ' + str(i).zfill(2)) for i in range(len
(self.Losses))]
for Ctr, n in enumerate(Names, 0):
self.Names[Ctr] = n
self.cleanUp()
def __len__(self):
return len(self.Losses)
def getItems(self, withoutWeights=False):
RetLossValsFloat = []
if withoutWeights:
for v in self.LossVals:
RetLossValsFloat.append(v.item())
else:
for v in self.LossValsWeighted:
RetLossValsFloat.append(v.item())
return RetLossValsFloat
def cleanUp(self):
self.LossVals = [0.0] * len(self.Losses)
self.LossValsWeighted = [0.0] * len(self.Losses)
def forward(self, output, target):
self.cleanUp()
return self.computeLoss(output, target)
def computeLoss(self, output, target):
TotalLossVal = 0.0
for Ctr, (l, w) in enumerate(zip(self.Losses, self.Weights), 0):
LossVal = l.forward(output, target)
self.LossVals[Ctr] = LossVal
self.LossValsWeighted[Ctr] = w * LossVal
TotalLossVal += self.LossValsWeighted[Ctr]
return TotalLossVal
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Bicubic | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yu/cyumhrxnw3es3qen6qbgqg7arzzipbge5qfdw6h4pgo2pm4mgsbc.py
# Topologically Sorted Source Nodes: [bicubic_output], Original ATen: [aten.floor, aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten._unsafe_index, aten.clamp, aten.rsub]
# Source node to ATen node mapping:
# bicubic_output => _unsafe_index, _unsafe_index_1, _unsafe_index_10, _unsafe_index_11, _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, _unsafe_index_8, _unsafe_index_9, add, add_10, add_11, add_12, add_13, add_14, add_15, add_16, add_17, add_18, add_19, add_20, add_21, add_22, add_23, add_24, add_25, add_26, add_27, add_28, add_29, add_30, add_6, add_7, add_8, add_9, clamp_max, clamp_max_1, clamp_min, clamp_min_1, convert_element_type_1, floor, floor_1, iota_1, mul, mul_10, mul_11, mul_12, mul_13, mul_14, mul_15, mul_16, mul_17, mul_18, mul_19, mul_2, mul_20, mul_21, mul_22, mul_23, mul_24, mul_25, mul_26, mul_27, mul_28, mul_29, mul_3, mul_30, mul_31, mul_32, mul_33, mul_34, mul_35, mul_36, mul_37, mul_38, mul_39, mul_4, mul_40, mul_41, mul_42, mul_43, mul_44, mul_45, mul_5, mul_6, mul_7, mul_8, mul_9, sub, sub_10, sub_11, sub_12, sub_13, sub_14, sub_15, sub_16, sub_17, sub_18, sub_19, sub_2, sub_20, sub_21, sub_3, sub_6, sub_7, sub_8, sub_9
# Graph fragment:
# %floor_1 : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%unsqueeze,), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %floor : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%sub,), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_2, %clamp_max_3]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %floor), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_1 : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max_1, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, -0.75), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, -3.75), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %add_6), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, -6.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %add_6), kwargs = {})
# %sub_7 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, -3.0), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index, %sub_7), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 1.25), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 2.25), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %clamp_max_1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {})
# %add_8 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %add_8), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_6, %clamp_max_7]), kwargs = {})
# %sub_9 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max_1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, 1.25), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_8, 2.25), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_9), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %sub_9), kwargs = {})
# %add_9 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, 1), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_2, %add_9), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %mul_28), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_11 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max_1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, -0.75), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_11, -3.75), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %sub_11), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, -6.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, %sub_11), kwargs = {})
# %sub_13 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_13, -3.0), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_3, %sub_13), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_29), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %floor_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {})
# %add_11 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, 1.0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, -0.75), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_14, -3.75), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %add_11), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, -6.0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, %add_11), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_16, -3.0), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_18, %sub_15), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_10, %clamp_max_11]), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_4, %sub_7), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_5, %add_8), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_30, %mul_31), kwargs = {})
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_14, %clamp_max_15]), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_6, %add_9), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %mul_32), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_16, %clamp_max_17]), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_7, %sub_13), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %mul_33), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 1.25), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_17, 2.25), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %clamp_max), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, 1), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, %add_13), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_42, %mul_43), kwargs = {})
# %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_18, %clamp_max_19]), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_8, %sub_7), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_20, %clamp_max_21]), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_9, %add_8), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_34, %mul_35), kwargs = {})
# %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_22, %clamp_max_23]), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_10, %add_9), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_22, %mul_36), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_24, %clamp_max_25]), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_11, %sub_13), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %mul_37), kwargs = {})
# %sub_17 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, 1.25), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_20, 2.25), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_17), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_21, %sub_17), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, 1), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_24, %add_14), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %mul_44), kwargs = {})
# %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_26, %clamp_max_27]), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_12, %sub_7), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_28, %clamp_max_29]), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_13, %add_8), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_38, %mul_39), kwargs = {})
# %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_30, %clamp_max_31]), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_14, %add_9), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_40), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_32, %clamp_max_33]), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_15, %sub_13), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %mul_41), kwargs = {})
# %sub_19 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, -0.75), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_23, -3.75), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_19), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, -6.0), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_15, %sub_19), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_25, -3.0), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_27, %sub_21), kwargs = {})
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %mul_45), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(in_out_ptr1, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = libdevice.floor(tmp5)
tmp7 = tmp6.to(tl.int32)
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp7 - tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp2
tmp18 = tmp17 - tmp2
tmp19 = libdevice.floor(tmp18)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 - tmp8
tmp22 = triton_helpers.maximum(tmp21, tmp10)
tmp23 = triton_helpers.minimum(tmp22, tmp12)
tmp24 = tl.load(in_ptr0 + (tmp23 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tmp18 - tmp19
tmp26 = 0.0
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp29 + tmp28
tmp31 = -0.75
tmp32 = tmp30 * tmp31
tmp33 = -3.75
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp30
tmp36 = -6.0
tmp37 = tmp35 + tmp36
tmp38 = tmp37 * tmp30
tmp39 = -3.0
tmp40 = tmp38 - tmp39
tmp41 = tmp24 * tmp40
tmp42 = triton_helpers.maximum(tmp20, tmp10)
tmp43 = triton_helpers.minimum(tmp42, tmp12)
tmp44 = tl.load(in_ptr0 + (tmp43 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp45 = 1.25
tmp46 = tmp29 * tmp45
tmp47 = 2.25
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp29
tmp50 = tmp49 * tmp29
tmp51 = tmp50 + tmp28
tmp52 = tmp44 * tmp51
tmp53 = tmp20 + tmp8
tmp54 = triton_helpers.maximum(tmp53, tmp10)
tmp55 = triton_helpers.minimum(tmp54, tmp12)
tmp56 = tl.load(in_ptr0 + (tmp55 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp57 = tmp28 - tmp29
tmp58 = tmp57 * tmp45
tmp59 = tmp58 - tmp47
tmp60 = tmp59 * tmp57
tmp61 = tmp60 * tmp57
tmp62 = tmp61 + tmp28
tmp63 = tmp56 * tmp62
tmp64 = tl.full([1], 2, tl.int64)
tmp65 = tmp20 + tmp64
tmp66 = triton_helpers.maximum(tmp65, tmp10)
tmp67 = triton_helpers.minimum(tmp66, tmp12)
tmp68 = tl.load(in_ptr0 + (tmp67 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last')
tmp69 = 2.0
tmp70 = tmp69 - tmp29
tmp71 = tmp70 * tmp31
tmp72 = tmp71 - tmp33
tmp73 = tmp72 * tmp70
tmp74 = tmp73 + tmp36
tmp75 = tmp74 * tmp70
tmp76 = tmp75 - tmp39
tmp77 = tmp68 * tmp76
tmp78 = tmp41 + tmp52
tmp79 = tmp78 + tmp63
tmp80 = tmp79 + tmp77
tmp81 = tmp5 - tmp6
tmp82 = triton_helpers.maximum(tmp81, tmp26)
tmp83 = triton_helpers.minimum(tmp82, tmp28)
tmp84 = tmp83 + tmp28
tmp85 = tmp84 * tmp31
tmp86 = tmp85 - tmp33
tmp87 = tmp86 * tmp84
tmp88 = tmp87 + tmp36
tmp89 = tmp88 * tmp84
tmp90 = tmp89 - tmp39
tmp91 = tmp80 * tmp90
tmp92 = triton_helpers.maximum(tmp7, tmp10)
tmp93 = triton_helpers.minimum(tmp92, tmp12)
tmp94 = tl.load(in_ptr0 + (tmp23 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last')
tmp95 = tmp94 * tmp40
tmp96 = tl.load(in_ptr0 + (tmp43 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last')
tmp97 = tmp96 * tmp51
tmp98 = tl.load(in_ptr0 + (tmp55 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last')
tmp99 = tmp98 * tmp62
tmp100 = tl.load(in_ptr0 + (tmp67 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last')
tmp101 = tmp100 * tmp76
tmp102 = tmp7 + tmp8
tmp103 = triton_helpers.maximum(tmp102, tmp10)
tmp104 = triton_helpers.minimum(tmp103, tmp12)
tmp105 = tl.load(in_ptr0 + (tmp23 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last')
tmp106 = tmp105 * tmp40
tmp107 = tl.load(in_ptr0 + (tmp43 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last')
tmp108 = tmp107 * tmp51
tmp109 = tl.load(in_ptr0 + (tmp55 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last')
tmp110 = tmp109 * tmp62
tmp111 = tl.load(in_ptr0 + (tmp67 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last')
tmp112 = tmp111 * tmp76
tmp113 = tmp95 + tmp97
tmp114 = tmp113 + tmp99
tmp115 = tmp114 + tmp101
tmp116 = tmp83 * tmp45
tmp117 = tmp116 - tmp47
tmp118 = tmp117 * tmp83
tmp119 = tmp118 * tmp83
tmp120 = tmp119 + tmp28
tmp121 = tmp115 * tmp120
tmp122 = tmp91 + tmp121
tmp123 = tmp106 + tmp108
tmp124 = tmp123 + tmp110
tmp125 = tmp124 + tmp112
tmp126 = tmp28 - tmp83
tmp127 = tmp126 * tmp45
tmp128 = tmp127 - tmp47
tmp129 = tmp128 * tmp126
tmp130 = tmp129 * tmp126
tmp131 = tmp130 + tmp28
tmp132 = tmp125 * tmp131
tmp133 = tmp122 + tmp132
tmp134 = tmp7 + tmp64
tmp135 = triton_helpers.maximum(tmp134, tmp10)
tmp136 = triton_helpers.minimum(tmp135, tmp12)
tmp137 = tl.load(in_ptr0 + (tmp23 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last')
tmp138 = tmp137 * tmp40
tmp139 = tl.load(in_ptr0 + (tmp43 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last')
tmp140 = tmp139 * tmp51
tmp141 = tl.load(in_ptr0 + (tmp55 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last')
tmp142 = tmp141 * tmp62
tmp143 = tl.load(in_ptr0 + (tmp67 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last')
tmp144 = tmp143 * tmp76
tmp145 = tmp138 + tmp140
tmp146 = tmp145 + tmp142
tmp147 = tmp146 + tmp144
tmp148 = tmp69 - tmp83
tmp149 = tmp148 * tmp31
tmp150 = tmp149 - tmp33
tmp151 = tmp150 * tmp148
tmp152 = tmp151 + tmp36
tmp153 = tmp152 * tmp148
tmp154 = tmp153 - tmp39
tmp155 = tmp147 * tmp154
tmp156 = tmp133 + tmp155
tl.store(in_out_ptr1 + (x3), tmp156, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf13 = buf10; del buf10 # reuse
buf19 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [bicubic_output], Original ATen: [aten.floor, aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten._unsafe_index, aten.clamp, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0.run(buf19, arg0_1, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf19, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Bicubic(nn.Module):
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, inputs):
bicubic_output = F.interpolate(inputs, scale_factor=self.
scale_factor, mode='bicubic')
return bicubic_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(
in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = libdevice.floor(tmp5)
tmp7 = tmp6.to(tl.int32)
tmp8 = tl.full([1], 1, tl.int64)
tmp9 = tmp7 - tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp2
tmp18 = tmp17 - tmp2
tmp19 = libdevice.floor(tmp18)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 - tmp8
tmp22 = triton_helpers.maximum(tmp21, tmp10)
tmp23 = triton_helpers.minimum(tmp22, tmp12)
tmp24 = tl.load(in_ptr0 + (tmp23 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp18 - tmp19
tmp26 = 0.0
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp29 + tmp28
tmp31 = -0.75
tmp32 = tmp30 * tmp31
tmp33 = -3.75
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp30
tmp36 = -6.0
tmp37 = tmp35 + tmp36
tmp38 = tmp37 * tmp30
tmp39 = -3.0
tmp40 = tmp38 - tmp39
tmp41 = tmp24 * tmp40
tmp42 = triton_helpers.maximum(tmp20, tmp10)
tmp43 = triton_helpers.minimum(tmp42, tmp12)
tmp44 = tl.load(in_ptr0 + (tmp43 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp45 = 1.25
tmp46 = tmp29 * tmp45
tmp47 = 2.25
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp29
tmp50 = tmp49 * tmp29
tmp51 = tmp50 + tmp28
tmp52 = tmp44 * tmp51
tmp53 = tmp20 + tmp8
tmp54 = triton_helpers.maximum(tmp53, tmp10)
tmp55 = triton_helpers.minimum(tmp54, tmp12)
tmp56 = tl.load(in_ptr0 + (tmp55 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp57 = tmp28 - tmp29
tmp58 = tmp57 * tmp45
tmp59 = tmp58 - tmp47
tmp60 = tmp59 * tmp57
tmp61 = tmp60 * tmp57
tmp62 = tmp61 + tmp28
tmp63 = tmp56 * tmp62
tmp64 = tl.full([1], 2, tl.int64)
tmp65 = tmp20 + tmp64
tmp66 = triton_helpers.maximum(tmp65, tmp10)
tmp67 = triton_helpers.minimum(tmp66, tmp12)
tmp68 = tl.load(in_ptr0 + (tmp67 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp69 = 2.0
tmp70 = tmp69 - tmp29
tmp71 = tmp70 * tmp31
tmp72 = tmp71 - tmp33
tmp73 = tmp72 * tmp70
tmp74 = tmp73 + tmp36
tmp75 = tmp74 * tmp70
tmp76 = tmp75 - tmp39
tmp77 = tmp68 * tmp76
tmp78 = tmp41 + tmp52
tmp79 = tmp78 + tmp63
tmp80 = tmp79 + tmp77
tmp81 = tmp5 - tmp6
tmp82 = triton_helpers.maximum(tmp81, tmp26)
tmp83 = triton_helpers.minimum(tmp82, tmp28)
tmp84 = tmp83 + tmp28
tmp85 = tmp84 * tmp31
tmp86 = tmp85 - tmp33
tmp87 = tmp86 * tmp84
tmp88 = tmp87 + tmp36
tmp89 = tmp88 * tmp84
tmp90 = tmp89 - tmp39
tmp91 = tmp80 * tmp90
tmp92 = triton_helpers.maximum(tmp7, tmp10)
tmp93 = triton_helpers.minimum(tmp92, tmp12)
tmp94 = tl.load(in_ptr0 + (tmp23 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp95 = tmp94 * tmp40
tmp96 = tl.load(in_ptr0 + (tmp43 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp97 = tmp96 * tmp51
tmp98 = tl.load(in_ptr0 + (tmp55 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp99 = tmp98 * tmp62
tmp100 = tl.load(in_ptr0 + (tmp67 + 4 * tmp93 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp101 = tmp100 * tmp76
tmp102 = tmp7 + tmp8
tmp103 = triton_helpers.maximum(tmp102, tmp10)
tmp104 = triton_helpers.minimum(tmp103, tmp12)
tmp105 = tl.load(in_ptr0 + (tmp23 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp106 = tmp105 * tmp40
tmp107 = tl.load(in_ptr0 + (tmp43 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp108 = tmp107 * tmp51
tmp109 = tl.load(in_ptr0 + (tmp55 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp110 = tmp109 * tmp62
tmp111 = tl.load(in_ptr0 + (tmp67 + 4 * tmp104 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp112 = tmp111 * tmp76
tmp113 = tmp95 + tmp97
tmp114 = tmp113 + tmp99
tmp115 = tmp114 + tmp101
tmp116 = tmp83 * tmp45
tmp117 = tmp116 - tmp47
tmp118 = tmp117 * tmp83
tmp119 = tmp118 * tmp83
tmp120 = tmp119 + tmp28
tmp121 = tmp115 * tmp120
tmp122 = tmp91 + tmp121
tmp123 = tmp106 + tmp108
tmp124 = tmp123 + tmp110
tmp125 = tmp124 + tmp112
tmp126 = tmp28 - tmp83
tmp127 = tmp126 * tmp45
tmp128 = tmp127 - tmp47
tmp129 = tmp128 * tmp126
tmp130 = tmp129 * tmp126
tmp131 = tmp130 + tmp28
tmp132 = tmp125 * tmp131
tmp133 = tmp122 + tmp132
tmp134 = tmp7 + tmp64
tmp135 = triton_helpers.maximum(tmp134, tmp10)
tmp136 = triton_helpers.minimum(tmp135, tmp12)
tmp137 = tl.load(in_ptr0 + (tmp23 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp138 = tmp137 * tmp40
tmp139 = tl.load(in_ptr0 + (tmp43 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp140 = tmp139 * tmp51
tmp141 = tl.load(in_ptr0 + (tmp55 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp142 = tmp141 * tmp62
tmp143 = tl.load(in_ptr0 + (tmp67 + 4 * tmp136 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp144 = tmp143 * tmp76
tmp145 = tmp138 + tmp140
tmp146 = tmp145 + tmp142
tmp147 = tmp146 + tmp144
tmp148 = tmp69 - tmp83
tmp149 = tmp148 * tmp31
tmp150 = tmp149 - tmp33
tmp151 = tmp150 * tmp148
tmp152 = tmp151 + tmp36
tmp153 = tmp152 * tmp148
tmp154 = tmp153 - tmp39
tmp155 = tmp147 * tmp154
tmp156 = tmp133 + tmp155
tl.store(in_out_ptr1 + x3, tmp156, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
buf13 = buf10
del buf10
buf19 = buf13
del buf13
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0[
grid(1024)](buf19, arg0_1, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
return buf19,
class BicubicNew(nn.Module):
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bui-thanh-lam/image-super-resolution | Bicubic | false | 6,373 | [
"BSD-2-Clause"
] | 1 | 8eee69c9fdd3aaf760fabfb5a294f083c7ddf4ac | https://github.com/bui-thanh-lam/image-super-resolution/tree/8eee69c9fdd3aaf760fabfb5a294f083c7ddf4ac | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, scale_factor=2):
super().__init__()
self.scale_factor = scale_factor
def forward(self, inputs):
bicubic_output = F.interpolate(inputs, scale_factor=self.
scale_factor, mode='bicubic')
return bicubic_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FCBottleNeck | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zy/czylxf6rfbnbz2ddgd3xovxwjqnfen7sgqej5mnv46j2fekwnniz.py
# Topologically Sorted Source Nodes: [x_pe], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_pe => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048, ), (1, ))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048, ), (1, ))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_pe], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf6, 131072, grid=grid(131072), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_pe_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 131072, grid=grid(131072), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_pe_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2048, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
import torch
class FCBottleNeck(nn.Module):
def __init__(self, InFeatureSize):
super().__init__()
self.FC1 = nn.Linear(InFeatureSize, 2048)
self.FC2 = nn.Linear(2048, 2048)
self.FC3 = nn.Linear(2048, InFeatureSize)
def forward(self, x):
x_pe = x
x_pe = F.relu(self.FC1(x_pe))
x_pe = F.relu(self.FC2(x_pe))
x_pe = self.FC3(x_pe)
return x_pe
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'InFeatureSize': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048,), (1,))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1,
primals_3, buf6, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf3,
primals_5, buf5, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0
), primals_6, buf5, primals_4, buf6
class FCBottleNeckNew(nn.Module):
def __init__(self, InFeatureSize):
super().__init__()
self.FC1 = nn.Linear(InFeatureSize, 2048)
self.FC2 = nn.Linear(2048, 2048)
self.FC3 = nn.Linear(2048, InFeatureSize)
def forward(self, input_0):
primals_2 = self.FC1.weight
primals_3 = self.FC1.bias
primals_4 = self.FC2.weight
primals_5 = self.FC2.bias
primals_6 = self.FC3.weight
primals_7 = self.FC3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| brown-ivl/beacon | FCBottleNeck | false | 6,374 | [
"MIT"
] | 1 | 66a1714473b362294f787f261561e39c52f00e42 | https://github.com/brown-ivl/beacon/tree/66a1714473b362294f787f261561e39c52f00e42 | import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
import torch
class Model(nn.Module):
def __init__(self, InFeatureSize):
super().__init__()
self.FC1 = nn.Linear(InFeatureSize, 2048)
self.FC2 = nn.Linear(2048, 2048)
self.FC3 = nn.Linear(2048, InFeatureSize)
def forward(self, x):
x_pe = x
x_pe = F.relu(self.FC1(x_pe))
x_pe = F.relu(self.FC2(x_pe))
x_pe = self.FC3(x_pe)
return x_pe
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CustomizedNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zb/czbrdc6746xv7kfxrqkzgbhm74ijdfuyfd3sz3llzzwzm6wzxmfi.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.distributed
class CustomizedNet(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
x = x.view(-1, x.shape[1] * x.shape[2])
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc2(x)
x = torch.unsqueeze(x, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5, 'input_size': 4, 'input_feature_num': 4,
'hidden_dim': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1),
0), reinterpret_tensor(primals_2, (16, 4), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, primals_4
class CustomizedNetNew(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| cabuliwallah/analytics-zoo | CustomizedNet | false | 6,375 | [
"Apache-2.0"
] | 1 | 5e662bd01c5fc7eed412973119594cf2ecea8b11 | https://github.com/cabuliwallah/analytics-zoo/tree/5e662bd01c5fc7eed412973119594cf2ecea8b11 | import torch
import torch.nn as nn
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, dropout, input_size, input_feature_num, hidden_dim,
output_size):
"""
Simply use linear layers for multi-variate single-step forecasting.
"""
super().__init__()
self.fc1 = nn.Linear(input_size * input_feature_num, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dim, output_size)
def forward(self, x):
x = x.view(-1, x.shape[1] * x.shape[2])
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc2(x)
x = torch.unsqueeze(x, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dropout': 0.5, 'input_size': 4, 'input_feature_num': 4,
'hidden_dim': 4, 'output_size': 4}]
|
Policy | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vh/cvhowampoosezwy5zm5vfkdmhzrvsn2u2gxpn4cchngk4b74ympu.py
# Topologically Sorted Source Nodes: [action_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# action_prob => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [action_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 128, grid=grid(128), stream=stream0)
del buf2
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [state_values], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (buf3, reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf3, primals_6, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = F.relu(self.affine1(x))
action_prob = F.softmax(self.action_head(x), dim=-1)
state_values = self.value_head(x)
return action_prob, state_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128),
0), alpha=1, beta=1, out=buf5)
del primals_7
return buf3, reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf3, primals_6, primals_4, buf6
class PolicyNew(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(PolicyNew, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.action_head.weight
primals_5 = self.action_head.bias
primals_6 = self.value_head.weight
primals_7 = self.value_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| caimingxue/Reinforcement-Learning | Policy | false | 6,376 | [
"MIT"
] | 1 | 5ccb8a6a25b41526f4d6195e69964245abc46d38 | https://github.com/caimingxue/Reinforcement-Learning/tree/5ccb8a6a25b41526f4d6195e69964245abc46d38 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.action_head = nn.Linear(128, 2)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = F.relu(self.affine1(x))
action_prob = F.softmax(self.action_head(x), dim=-1)
state_values = self.value_head(x)
return action_prob, state_values
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5d/c5dw65h6nafj4s44sgiahjrq6lb3zgwonovnkpx75jkkuxpl34xg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kk/ckkeehad7xvjmxduxmwzjdm4zu3f5inttmd6kj4pju55xhwrnoea.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_3 => add, add_1, convert_element_type, convert_element_type_1, iota_2, mul, mul_1
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/md/cmdntih7abyn6mwxuftxv3u7gvdavkba4iadd7mlu5mbghjqqv56.py
# Topologically Sorted Source Nodes: [x_1, x_2, x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# x_3 => _unsafe_index_2
# x_4 => _unsafe_index_3, _unsafe_index_4
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_3, [None, None, None, %sub_5]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 10) % 10
x0 = xindex % 10
x4 = (xindex // 100)
x2 = (xindex // 100) % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x4)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x7), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7v/c7v45oloqfn4euhmt66oczbasvb63gxwy7fm3svojah6q6q2th7g.py
# Topologically Sorted Source Nodes: [x_5, x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_5 => convolution_1
# x_6 => relu_1
# x_7 => _unsafe_index_5, _unsafe_index_6
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_4, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_5, [None, None, None, %sub_5]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = (xindex // 10) % 10
x4 = (xindex // 100)
x2 = (xindex // 100) % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x4)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7n/c7nyyvchtptp5rbqvks7cavg63gajcswx5tpkvypdmc7ocd2zaaz.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_16 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_12, mul_4, mul_5
# Graph fragment:
# %iota_12 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_12, 1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qf/cqflzwnnhiyl7hoawchcxkqxkkwckfdq3rpeoa3o4sk6wo5a6n6n.py
# Topologically Sorted Source Nodes: [x_14, x_15, x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_14 => convolution_4
# x_15 => relu_4
# x_16 => _unsafe_index_11
# x_17 => _unsafe_index_12, _unsafe_index_13
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_11, [None, None, %sub_21, None]), kwargs = {})
# %_unsafe_index_13 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_12, [None, None, None, %sub_21]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 18) % 18
x0 = xindex % 18
x4 = (xindex // 324)
x2 = (xindex // 324) % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x4)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x7), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/i4/ci4u2uzwdl4x6xsvbrs62puexrtawx4kfqztnsboljgpqpev5zur.py
# Topologically Sorted Source Nodes: [x_18, x_19, x_20], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_18 => convolution_5
# x_19 => relu_5
# x_20 => _unsafe_index_14, _unsafe_index_15
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %sub_21, None]), kwargs = {})
# %_unsafe_index_15 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_14, [None, None, None, %sub_21]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x4 = (xindex // 324)
x2 = (xindex // 324) % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x4)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uw/cuwhadke7ul24n6hzb5sal45fu4ro7spo5s7tl5x4bo5jl7gz66f.py
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_23 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_18, mul_8, mul_9
# Graph fragment:
# %iota_18 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_18, 1), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {})
# %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k7/ck7bf3l3rzn2b5z27scfb6po6ttassqspu4urfkn5e7xbhsq2s2d.py
# Topologically Sorted Source Nodes: [x_21, x_22, x_23, x_24], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_21 => convolution_6
# x_22 => relu_6
# x_23 => _unsafe_index_16
# x_24 => _unsafe_index_17, _unsafe_index_18
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %_unsafe_index_16 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {})
# %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_16, [None, None, %sub_29, None]), kwargs = {})
# %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_17, [None, None, None, %sub_29]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 34) % 34
x0 = xindex % 34
x4 = (xindex // 1156)
x2 = (xindex // 1156) % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x4)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x7), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yx/cyxiisvd6357e3nrt4znkmjivtcc45mzwcgpnvrj5jiwbalchths.py
# Topologically Sorted Source Nodes: [x_25, x_26, x_27], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# x_25 => convolution_7
# x_26 => relu_7
# x_27 => _unsafe_index_19, _unsafe_index_20
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_18, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %sub_29, None]), kwargs = {})
# %_unsafe_index_20 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_19, [None, None, None, %sub_29]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_9 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = (xindex // 34) % 34
x4 = (xindex // 1156)
x2 = (xindex // 1156) % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uj/cujb6y5y4osx2vvsp7mk2u3fjeqpsrecy4fu2epgp3awq5idnjyq.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_28 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_20, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eo/ceoyvrt7lilhj2azq2emt7bipqc444mts3q5pn2y2v3acs3msegf.py
# Topologically Sorted Source Nodes: [x_25, x_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_25 => convolution_7
# x_26 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_18, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le_18 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dt/cdtdjcn5ao6jckbptzeeuah2yudb27um4uwlvwffbnwtbnhpytvf.py
# Topologically Sorted Source Nodes: [x_21, x_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_21 => convolution_6
# x_22 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ux/cuxkikwlw6hj4upwuoko3b6add2lr74ihu73rd7qw6rcofoytsns.py
# Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_18 => convolution_5
# x_19 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le_56 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qk/cqkmpfvaqberctsqs5e4tsqughooqyxlitaje3pdowd5wtcne5tc.py
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_14 => convolution_4
# x_15 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le_75 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ov/covedzte6ka54ajsekyitrd6p6mpp523wulxwtrl63ektefmpuve.py
# Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_11 => convolution_3
# x_12 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_8, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_94 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gy/cgybnsrpcjgnh4rqattwudtwjjam2me24yiscurttis3qbe4diay.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_151 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 73728, grid=grid(73728), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 8, grid=grid(8), stream=stream0)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2, x_3, x_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2.run(buf2, buf1, primals_3, buf3, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, x_6, x_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf4, primals_5, buf5, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8, x_9, x_10], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf6, primals_7, buf7, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11, x_12, x_13], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf8, primals_9, buf9, 102400, grid=grid(102400), stream=stream0)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_4.run(buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_14, x_15, x_16, x_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5.run(buf11, buf10, primals_11, buf12, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_18, x_19, x_20], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf13, primals_13, buf14, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_7.run(buf16, 32, grid=grid(32), stream=stream0)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_21, x_22, x_23, x_24], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8.run(buf16, buf15, primals_15, buf17, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_25, x_26, x_27], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_9.run(buf18, primals_17, buf19, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf21, primals_19, 12288, grid=grid(12288), stream=stream0)
del primals_19
buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_25, x_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf18, primals_17, buf22, 262144, grid=grid(262144), stream=stream0)
del buf18
del primals_17
buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_21, x_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_12.run(buf15, primals_15, buf23, 65536, grid=grid(65536), stream=stream0)
del buf15
del primals_15
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_13.run(buf13, primals_13, buf24, 131072, grid=grid(131072), stream=stream0)
del buf13
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_14.run(buf10, primals_11, buf25, 32768, grid=grid(32768), stream=stream0)
del buf10
del primals_11
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf8, primals_9, buf26, 65536, grid=grid(65536), stream=stream0)
del buf8
del primals_9
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf6, primals_7, buf27, 65536, grid=grid(65536), stream=stream0)
del buf6
del primals_7
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_15.run(buf4, primals_5, buf28, 65536, grid=grid(65536), stream=stream0)
del buf4
del primals_5
buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_16.run(buf1, primals_3, buf29, 16384, grid=grid(16384), stream=stream0)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22, buf23, buf24, buf25, buf26, buf27, buf28, buf29, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class RC(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.rc1 = RC(512, 256, 3, 1)
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc2 = RC(256, 256, 3, 1)
self.rc3 = RC(256, 256, 3, 1)
self.rc4 = RC(256, 256, 3, 1)
self.rc5 = RC(256, 128, 3, 1)
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc6 = RC(128, 128, 3, 1)
self.rc7 = RC(128, 64, 3, 1)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc8 = RC(64, 64, 3, 1)
self.rc9 = RC(64, 3, 3, 1, False)
def forward(self, x):
x = self.rc1(x)
x = self.upsample1(x)
x = self.rc2(x)
x = self.rc3(x)
x = self.rc4(x)
x = self.rc5(x)
x = self.upsample2(x)
x = self.rc6(x)
x = self.rc7(x)
x = self.upsample3(x)
x = self.rc8(x)
x = self.rc9(x)
return x
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 10 % 10
x0 = xindex % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x7 = xindex
tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1
))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0
))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 10
x1 = xindex // 10 % 10
x4 = xindex // 100
x2 = xindex // 100 % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 +
x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 18 % 18
x0 = xindex % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x4 = xindex // 324
x2 = xindex // 324 % 128
x5 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x7, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1))
assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0,
73728, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1))
buf2 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK
=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid
(102400)](buf2, buf1, primals_3, buf3, 102400, XBLOCK=512,
num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1))
buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf4
, primals_5, buf5, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1))
buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf6
, primals_7, buf7, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1))
buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf8
, primals_9, buf9, 102400, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1))
buf11 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid
(165888)](buf11, buf10, primals_11, buf12, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1))
buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(165888)](
buf13, primals_13, buf14, 165888, XBLOCK=512, num_warps=8,
num_stages=1)
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1))
buf16 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf16, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid
(295936)](buf16, buf15, primals_15, buf17, 295936, XBLOCK=1024,
num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(295936)](
buf18, primals_17, buf19, 295936, XBLOCK=512, num_warps=8,
num_stages=1)
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_10[grid(12288)](buf21, primals_19,
12288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(262144)](
buf18, primals_17, buf22, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_17
buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(65536)](
buf15, primals_15, buf23, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf15
del primals_15
buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_13[grid(131072)](
buf13, primals_13, buf24, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf13
del primals_13
buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_14[grid(32768)](
buf10, primals_11, buf25, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del buf10
del primals_11
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf8, primals_9, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf8
del primals_9
buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf6, primals_7, buf27, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf6
del primals_7
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)](
buf4, primals_5, buf28, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf4
del primals_5
buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_16[grid(16384)](
buf1, primals_3, buf29, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del buf1
del primals_3
return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10,
primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3,
buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22,
buf23, buf24, buf25, buf26, buf27, buf28, buf29)
class RC(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
class DecoderNew(nn.Module):
def __init__(self):
super(DecoderNew, self).__init__()
self.rc1 = RC(512, 256, 3, 1)
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc2 = RC(256, 256, 3, 1)
self.rc3 = RC(256, 256, 3, 1)
self.rc4 = RC(256, 256, 3, 1)
self.rc5 = RC(256, 128, 3, 1)
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc6 = RC(128, 128, 3, 1)
self.rc7 = RC(128, 64, 3, 1)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc8 = RC(64, 64, 3, 1)
self.rc9 = RC(64, 3, 3, 1, False)
def forward(self, input_0):
primals_2 = self.rc1.conv.weight
primals_3 = self.rc1.conv.bias
primals_4 = self.rc2.conv.weight
primals_5 = self.rc2.conv.bias
primals_6 = self.rc3.conv.weight
primals_7 = self.rc3.conv.bias
primals_8 = self.rc4.conv.weight
primals_9 = self.rc4.conv.bias
primals_10 = self.rc5.conv.weight
primals_11 = self.rc5.conv.bias
primals_12 = self.rc6.conv.weight
primals_13 = self.rc6.conv.bias
primals_14 = self.rc7.conv.weight
primals_15 = self.rc7.conv.bias
primals_16 = self.rc8.conv.weight
primals_17 = self.rc8.conv.bias
primals_18 = self.rc9.conv.weight
primals_19 = self.rc9.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
| benningtonlee7/AdaIn_Style_Transfer_From_Scratch_In_Pytorch | Decoder | false | 6,377 | [
"MIT"
] | 1 | 50dfe4bdcbcdd0f4e647f9ee45de2a3f81eb6722 | https://github.com/benningtonlee7/AdaIn_Style_Transfer_From_Scratch_In_Pytorch/tree/50dfe4bdcbcdd0f4e647f9ee45de2a3f81eb6722 | import torch
import torch.nn as nn
import torch.nn.functional as F
class RC(nn.Module):
"""
A wrapper class for ReflectionPad2d, Conv2d and an optional relu
"""
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1,
activation_function=True):
super().__init__()
self.pad = nn.ReflectionPad2d((padding, padding, padding, padding))
self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
self.activation_function = activation_function
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return F.relu(x) if self.activation_function else x
class Model(nn.Module):
def __init__(self):
super().__init__()
self.rc1 = RC(512, 256, 3, 1)
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc2 = RC(256, 256, 3, 1)
self.rc3 = RC(256, 256, 3, 1)
self.rc4 = RC(256, 256, 3, 1)
self.rc5 = RC(256, 128, 3, 1)
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc6 = RC(128, 128, 3, 1)
self.rc7 = RC(128, 64, 3, 1)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.rc8 = RC(64, 64, 3, 1)
self.rc9 = RC(64, 3, 3, 1, False)
def forward(self, x):
x = self.rc1(x)
x = self.upsample1(x)
x = self.rc2(x)
x = self.rc3(x)
x = self.rc4(x)
x = self.rc5(x)
x = self.upsample2(x)
x = self.rc6(x)
x = self.rc7(x)
x = self.upsample3(x)
x = self.rc8(x)
x = self.rc9(x)
return x
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return []
|
DurationPredictorLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hw/chwuesadj2btsih4b62udzpqfogubf5ofjztuczxw2tuhmwsh3ei.py
# Topologically Sorted Source Nodes: [add, targets, loss], Original ATen: [aten.add, aten.log, aten.mse_loss]
# Source node to ATen node mapping:
# add => add
# loss => mean, pow_1, sub
# targets => log
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %log), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_add_log_mse_loss_0 = async_compile.triton('triton_per_fused_add_log_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp0 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, targets, loss], Original ATen: [aten.add, aten.log, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mse_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss()
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp0 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class DurationPredictorLossNew(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictorLossNew, self).__init__()
self.criterion = torch.nn.MSELoss()
self.offset = offset
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| carankt/FastSpeech2-1 | DurationPredictorLoss | false | 6,378 | [
"Apache-2.0"
] | 1 | 42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | import torch
class Model(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
"""
super().__init__()
self.criterion = torch.nn.MSELoss()
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MessageNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tb/ctbmwtnsd5n3r63lnnyiuox5hmrewy5dfpy3utohrhqbsgxj3pbw.py
# Topologically Sorted Source Nodes: [msg, x_norm, mul, mul_1], Original ATen: [aten.div, aten.linalg_vector_norm, aten.mul]
# Source node to ATen node mapping:
# msg => div
# mul => mul
# mul_1 => mul_1
# x_norm => pow_3, pow_4, sum_2
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg2_1), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (0))
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp16
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp15 * tmp27
tmp31 = tmp28 * tmp30
tl.store(in_out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [msg, x_norm, mul, mul_1], Original ATen: [aten.div, aten.linalg_vector_norm, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_mul_0.run(buf1, arg0_1, arg1_1, arg2_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Parameter
import torch.fx
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
class MessageNorm(torch.nn.Module):
"""Applies message normalization over the aggregated messages as described
in the `"DeeperGCNs: All You Need to Train Deeper GCNs"
<https://arxiv.org/abs/2006.07739>`_ paper
.. math::
\\mathbf{x}_i^{\\prime} = \\mathrm{MLP} \\left( \\mathbf{x}_{i} + s \\cdot
{\\| \\mathbf{x}_i \\|}_2 \\cdot
\\frac{\\mathbf{m}_{i}}{{\\|\\mathbf{m}_i\\|}_2} \\right)
Args:
learn_scale (bool, optional): If set to :obj:`True`, will learn the
scaling factor :math:`s` of message normalization.
(default: :obj:`False`)
"""
def __init__(self, learn_scale: 'bool'=False):
super(MessageNorm, self).__init__()
self.scale = Parameter(torch.Tensor([1.0]), requires_grad=learn_scale)
def reset_parameters(self):
self.scale.data.fill_(1.0)
def forward(self, x: 'Tensor', msg: 'Tensor', p: 'int'=2):
""""""
msg = F.normalize(msg, p=p, dim=-1)
x_norm = x.norm(p=p, dim=-1, keepdim=True)
return msg * x_norm * self.scale
def __repr__(self):
return '{}(learn_scale={})'.format(self.__class__.__name__, self.
scale.requires_grad)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Parameter
import torch.fx
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr2 + 0)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp16
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp15 * tmp27
tmp31 = tmp28 * tmp30
tl.store(in_out_ptr0 + x2, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_mul_0[grid(256)](buf1,
arg0_1, arg1_1, arg2_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf1,
class MessageNormNew(torch.nn.Module):
"""Applies message normalization over the aggregated messages as described
in the `"DeeperGCNs: All You Need to Train Deeper GCNs"
<https://arxiv.org/abs/2006.07739>`_ paper
.. math::
\\mathbf{x}_i^{\\prime} = \\mathrm{MLP} \\left( \\mathbf{x}_{i} + s \\cdot
{\\| \\mathbf{x}_i \\|}_2 \\cdot
\\frac{\\mathbf{m}_{i}}{{\\|\\mathbf{m}_i\\|}_2} \\right)
Args:
learn_scale (bool, optional): If set to :obj:`True`, will learn the
scaling factor :math:`s` of message normalization.
(default: :obj:`False`)
"""
def __init__(self, learn_scale: 'bool'=False):
super(MessageNormNew, self).__init__()
self.scale = Parameter(torch.Tensor([1.0]), requires_grad=learn_scale)
def reset_parameters(self):
self.scale.data.fill_(1.0)
def __repr__(self):
return '{}(learn_scale={})'.format(self.__class__.__name__, self.
scale.requires_grad)
def forward(self, input_0, input_1):
arg2_1 = self.scale
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| camus1337/pytorch_geometric | MessageNorm | false | 6,379 | [
"MIT"
] | 1 | 38514197a327541eb47abb69d4ab224910852605 | https://github.com/camus1337/pytorch_geometric/tree/38514197a327541eb47abb69d4ab224910852605 | import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Parameter
import torch.fx
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
class Model(torch.nn.Module):
"""Applies message normalization over the aggregated messages as described
in the `"DeeperGCNs: All You Need to Train Deeper GCNs"
<https://arxiv.org/abs/2006.07739>`_ paper
.. math::
\\mathbf{x}_i^{\\prime} = \\mathrm{MLP} \\left( \\mathbf{x}_{i} + s \\cdot
{\\| \\mathbf{x}_i \\|}_2 \\cdot
\\frac{\\mathbf{m}_{i}}{{\\|\\mathbf{m}_i\\|}_2} \\right)
Args:
learn_scale (bool, optional): If set to :obj:`True`, will learn the
scaling factor :math:`s` of message normalization.
(default: :obj:`False`)
"""
def __init__(self, learn_scale: 'bool'=False):
super().__init__()
self.scale = Parameter(torch.Tensor([1.0]), requires_grad=learn_scale)
def reset_parameters(self):
self.scale.data.fill_(1.0)
def forward(self, x: 'Tensor', msg: 'Tensor', p: 'int'=2):
""""""
msg = F.normalize(msg, p=p, dim=-1)
x_norm = x.norm(p=p, dim=-1, keepdim=True)
return msg * x_norm * self.scale
def __repr__(self):
return '{}(learn_scale={})'.format(self.__class__.__name__, self.
scale.requires_grad)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PGNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yb/cybsjfmgf75kwyq3kyez46wzwjgjffwtsqe2uwa7bdzwlb6l22gt.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 20), (20, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 1280, grid=grid(1280), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 20), (20, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 20), (20, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((20, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PGNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
super(PGNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 20), (20, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1,
primals_2, buf3, 1280, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 20),
(20, 1), 0), reinterpret_tensor(primals_4, (20, 4), (1, 20), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 20), (20, 1), 0), primals_4, buf3
class PGNetworkNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(PGNetworkNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| caimingxue/Reinforcement-Learning | PGNetwork | false | 6,380 | [
"MIT"
] | 1 | 5ccb8a6a25b41526f4d6195e69964245abc46d38 | https://github.com/caimingxue/Reinforcement-Learning/tree/5ccb8a6a25b41526f4d6195e69964245abc46d38 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tc/ctcvjovxxyfp4dwf6u5wmvekfhdg5gp643zej54dczitpdhp42pe.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/74/c74lpbw2rq4iu6ghsieimw7wwph5kzk2iiojo5f22stwavema7ym.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = (yindex // 16)
y4 = yindex % 16
y5 = yindex
y0 = yindex % 4
y1 = (yindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (y4 + (16*x3) + (64*y2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y5), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y5), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x3 + (4*y1) + (16*y0) + (64*y2)), tmp8, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, 4, grid=grid(64, 4), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 4, 16), 0), primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class LayerNorm(torch.nn.Module):
def __init__(self, nout: 'int'):
super(LayerNorm, self).__init__()
self.layer_norm = torch.nn.LayerNorm(nout, eps=1e-12)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.layer_norm(x.transpose(1, -1))
x = x.transpose(1, -1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nout': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = yindex // 16
y4 = yindex % 16
y5 = yindex
y0 = yindex % 4
y1 = yindex // 4 % 4
tmp0 = tl.load(in_ptr0 + (y4 + 16 * x3 + 64 * y2), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y5, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y5, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x3 + 4 * y1 + 16 * y0 + 64 * y2), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64, 4)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 4, 16), 0), primals_1
class LayerNormNew(torch.nn.Module):
def __init__(self, nout: 'int'):
super(LayerNormNew, self).__init__()
self.layer_norm = torch.nn.LayerNorm(nout, eps=1e-12)
def forward(self, input_0):
primals_2 = self.layer_norm.weight
primals_3 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| carankt/FastSpeech2-1 | LayerNorm | false | 6,381 | [
"Apache-2.0"
] | 1 | 42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | import torch
class Model(torch.nn.Module):
def __init__(self, nout: 'int'):
super().__init__()
self.layer_norm = torch.nn.LayerNorm(nout, eps=1e-12)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.layer_norm(x.transpose(1, -1))
x = x.transpose(1, -1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qq/cqq3voz2vrbd7rslz5ajg7li4jvrdw7rc3fc5hrieontbhfb3qmq.py
# Topologically Sorted Source Nodes: [mean, x, std, add, out, mul, out_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# out => div
# out_1 => add_1
# std => sqrt, var
# x => sub
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%primals_1,), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%sub,), kwargs = {correction: 0.0})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_per_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp25 = tl.load(in_ptr1 + (0))
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = tl.load(in_ptr2 + (0))
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = tmp0 - tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = tl.broadcast_to(tmp7, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.full([1], 256, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp7 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = tmp19 / tmp4
tmp21 = libdevice.sqrt(tmp20)
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = tmp6 / tmp23
tmp27 = tmp24 * tmp26
tmp30 = tmp27 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp23, None)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf3; del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, x, std, add, out, mul, out_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 1, 256, grid=grid(1), stream=stream0)
del primals_2
del primals_3
return (buf6, primals_1, buf1, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch.nn import Parameter
from torch.nn import LayerNorm
from typing import Optional
import torch.fx
from typing import Any
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
def maybe_num_nodes(edge_index, num_nodes=None):
if num_nodes is not None:
return num_nodes
elif isinstance(edge_index, Tensor):
return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
else:
return max(edge_index.size(0), edge_index.size(1))
def constant(value: 'Any', fill_value: 'float'):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in (value.parameters() if hasattr(value, 'parameters') else []):
constant(v, fill_value)
for v in (value.buffers() if hasattr(value, 'buffers') else []):
constant(v, fill_value)
def zeros(value: 'Any'):
constant(value, 0.0)
def ones(tensor: 'Any'):
constant(tensor, 1.0)
def degree(index, num_nodes: 'Optional[int]'=None, dtype: 'Optional[int]'=None
):
"""Computes the (unweighted) degree of a given one-dimensional index
tensor.
Args:
index (LongTensor): Index tensor.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dtype (:obj:`torch.dtype`, optional): The desired data type of the
returned tensor.
:rtype: :class:`Tensor`
"""
N = maybe_num_nodes(index, num_nodes)
out = torch.zeros((N,), dtype=dtype, device=index.device)
one = torch.ones((index.size(0),), dtype=out.dtype, device=out.device)
return out.scatter_add_(0, index, one)
class LayerNorm(torch.nn.Module):
"""Applies layer normalization over each individual example in a batch
of node features as described in the `"Layer Normalization"
<https://arxiv.org/abs/1607.06450>`_ paper
.. math::
\\mathbf{x}^{\\prime}_i = \\frac{\\mathbf{x} -
\\textrm{E}[\\mathbf{x}]}{\\sqrt{\\textrm{Var}[\\mathbf{x}] + \\epsilon}}
\\odot \\gamma + \\beta
The mean and standard-deviation are calculated across all nodes and all
node channels separately for each object in a mini-batch.
Args:
in_channels (int): Size of each input sample.
eps (float, optional): A value added to the denominator for numerical
stability. (default: :obj:`1e-5`)
affine (bool, optional): If set to :obj:`True`, this module has
learnable affine parameters :math:`\\gamma` and :math:`\\beta`.
(default: :obj:`True`)
"""
def __init__(self, in_channels, eps=1e-05, affine=True):
super(LayerNorm, self).__init__()
self.in_channels = in_channels
self.eps = eps
if affine:
self.weight = Parameter(torch.Tensor([in_channels]))
self.bias = Parameter(torch.Tensor([in_channels]))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
ones(self.weight)
zeros(self.bias)
def forward(self, x: 'Tensor', batch: 'OptTensor'=None) ->Tensor:
""""""
if batch is None:
x = x - x.mean()
out = x / (x.std(unbiased=False) + self.eps)
else:
batch_size = int(batch.max()) + 1
norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1)
norm = norm.mul_(x.size(-1)).view(-1, 1)
mean = scatter(x, batch, dim=0, dim_size=batch_size, reduce='add'
).sum(dim=-1, keepdim=True) / norm
x = x - mean[batch]
var = scatter(x * x, batch, dim=0, dim_size=batch_size, reduce=
'add').sum(dim=-1, keepdim=True)
var = var / norm
out = x / (var + self.eps).sqrt()[batch]
if self.weight is not None and self.bias is not None:
out = out * self.weight + self.bias
return out
def __repr__(self):
return f'{self.__class__.__name__}({self.in_channels})'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import Tensor
from torch.nn import Parameter
from typing import Optional
import torch.fx
from typing import Any
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp25 = tl.load(in_ptr1 + 0)
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = tl.load(in_ptr2 + 0)
tmp29 = tl.broadcast_to(tmp28, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = tmp0 - tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = tl.broadcast_to(tmp7, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = tl.full([1], 256, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp7 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = tmp19 / tmp4
tmp21 = libdevice.sqrt(tmp20)
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = tmp6 / tmp23
tmp27 = tmp24 * tmp26
tmp30 = tmp27 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp23, None)
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp30, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf3
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(1)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 1, 256, num_warps=2,
num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, buf1, buf5
def maybe_num_nodes(edge_index, num_nodes=None):
if num_nodes is not None:
return num_nodes
elif isinstance(edge_index, Tensor):
return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
else:
return max(edge_index.size(0), edge_index.size(1))
def constant(value: 'Any', fill_value: 'float'):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in (value.parameters() if hasattr(value, 'parameters') else []):
constant(v, fill_value)
for v in (value.buffers() if hasattr(value, 'buffers') else []):
constant(v, fill_value)
def zeros(value: 'Any'):
constant(value, 0.0)
def ones(tensor: 'Any'):
constant(tensor, 1.0)
def degree(index, num_nodes: 'Optional[int]'=None, dtype: 'Optional[int]'=None
):
"""Computes the (unweighted) degree of a given one-dimensional index
tensor.
Args:
index (LongTensor): Index tensor.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dtype (:obj:`torch.dtype`, optional): The desired data type of the
returned tensor.
:rtype: :class:`Tensor`
"""
N = maybe_num_nodes(index, num_nodes)
out = torch.zeros((N,), dtype=dtype, device=index.device)
one = torch.ones((index.size(0),), dtype=out.dtype, device=out.device)
return out.scatter_add_(0, index, one)
class LayerNormNew(torch.nn.Module):
"""Applies layer normalization over each individual example in a batch
of node features as described in the `"Layer Normalization"
<https://arxiv.org/abs/1607.06450>`_ paper
.. math::
\\mathbf{x}^{\\prime}_i = \\frac{\\mathbf{x} -
\\textrm{E}[\\mathbf{x}]}{\\sqrt{\\textrm{Var}[\\mathbf{x}] + \\epsilon}}
\\odot \\gamma + \\beta
The mean and standard-deviation are calculated across all nodes and all
node channels separately for each object in a mini-batch.
Args:
in_channels (int): Size of each input sample.
eps (float, optional): A value added to the denominator for numerical
stability. (default: :obj:`1e-5`)
affine (bool, optional): If set to :obj:`True`, this module has
learnable affine parameters :math:`\\gamma` and :math:`\\beta`.
(default: :obj:`True`)
"""
def __init__(self, in_channels, eps=1e-05, affine=True):
super(LayerNormNew, self).__init__()
self.in_channels = in_channels
self.eps = eps
if affine:
self.weight = Parameter(torch.Tensor([in_channels]))
self.bias = Parameter(torch.Tensor([in_channels]))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
ones(self.weight)
zeros(self.bias)
def __repr__(self):
return f'{self.__class__.__name__}({self.in_channels})'
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| camus1337/pytorch_geometric | LayerNorm | false | 6,382 | [
"MIT"
] | 1 | 38514197a327541eb47abb69d4ab224910852605 | https://github.com/camus1337/pytorch_geometric/tree/38514197a327541eb47abb69d4ab224910852605 | import torch
from torch import Tensor
from torch.nn import Parameter
from torch.nn import LayerNorm
from typing import Optional
import torch.fx
from typing import Any
import torch.utils.data
from inspect import Parameter
from torch.nn.parameter import Parameter
def maybe_num_nodes(edge_index, num_nodes=None):
if num_nodes is not None:
return num_nodes
elif isinstance(edge_index, Tensor):
return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0
else:
return max(edge_index.size(0), edge_index.size(1))
def constant(value: 'Any', fill_value: 'float'):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in (value.parameters() if hasattr(value, 'parameters') else []):
constant(v, fill_value)
for v in (value.buffers() if hasattr(value, 'buffers') else []):
constant(v, fill_value)
def zeros(value: 'Any'):
constant(value, 0.0)
def ones(tensor: 'Any'):
constant(tensor, 1.0)
def degree(index, num_nodes: 'Optional[int]'=None, dtype: 'Optional[int]'=None
):
"""Computes the (unweighted) degree of a given one-dimensional index
tensor.
Args:
index (LongTensor): Index tensor.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dtype (:obj:`torch.dtype`, optional): The desired data type of the
returned tensor.
:rtype: :class:`Tensor`
"""
N = maybe_num_nodes(index, num_nodes)
out = torch.zeros((N,), dtype=dtype, device=index.device)
one = torch.ones((index.size(0),), dtype=out.dtype, device=out.device)
return out.scatter_add_(0, index, one)
class Model(torch.nn.Module):
"""Applies layer normalization over each individual example in a batch
of node features as described in the `"Layer Normalization"
<https://arxiv.org/abs/1607.06450>`_ paper
.. math::
\\mathbf{x}^{\\prime}_i = \\frac{\\mathbf{x} -
\\textrm{E}[\\mathbf{x}]}{\\sqrt{\\textrm{Var}[\\mathbf{x}] + \\epsilon}}
\\odot \\gamma + \\beta
The mean and standard-deviation are calculated across all nodes and all
node channels separately for each object in a mini-batch.
Args:
in_channels (int): Size of each input sample.
eps (float, optional): A value added to the denominator for numerical
stability. (default: :obj:`1e-5`)
affine (bool, optional): If set to :obj:`True`, this module has
learnable affine parameters :math:`\\gamma` and :math:`\\beta`.
(default: :obj:`True`)
"""
def __init__(self, in_channels, eps=1e-05, affine=True):
super().__init__()
self.in_channels = in_channels
self.eps = eps
if affine:
self.weight = Parameter(torch.Tensor([in_channels]))
self.bias = Parameter(torch.Tensor([in_channels]))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
ones(self.weight)
zeros(self.bias)
def forward(self, x: 'Tensor', batch: 'OptTensor'=None) ->Tensor:
""""""
if batch is None:
x = x - x.mean()
out = x / (x.std(unbiased=False) + self.eps)
else:
batch_size = int(batch.max()) + 1
norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1)
norm = norm.mul_(x.size(-1)).view(-1, 1)
mean = scatter(x, batch, dim=0, dim_size=batch_size, reduce='add'
).sum(dim=-1, keepdim=True) / norm
x = x - mean[batch]
var = scatter(x * x, batch, dim=0, dim_size=batch_size, reduce=
'add').sum(dim=-1, keepdim=True)
var = var / norm
out = x / (var + self.eps).sqrt()[batch]
if self.weight is not None and self.bias is not Non
# ... truncated (>4000 chars) for memory efficiency |
MultiLayeredConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zt/cztfdbfdeuswkfmqcigzocsq5mos7eqthkdqr2u3uktw4kuq7d5w.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 3)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uj/cujexvhfquvwa2b5g6ftj7xjnepkxifmydmocky5zo4utu4xskqq.py
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 3)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf4, 12, grid=grid(12), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 3), (0, 3, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 3), (12, 3, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 12, grid=grid(12), stream=stream0)
del primals_5
return (reinterpret_tensor(buf3, (4, 3), (3, 1), 0), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 3), (12, 3, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class MultiLayeredConv1d(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans: 'int', hidden_chans: 'int', kernel_size:
'int', dropout_rate: 'float'):
super(MultiLayeredConv1d, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, 1, stride=1,
padding=(1 - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12)](buf1,
primals_3, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 3
), (0, 3, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 3), (12, 3, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(12)](buf3, primals_5, 12,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf3, (4, 3), (3, 1), 0
), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (
16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 3), (12, 3, 1), 0), buf4
class MultiLayeredConv1dNew(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans: 'int', hidden_chans: 'int', kernel_size:
'int', dropout_rate: 'float'):
super(MultiLayeredConv1dNew, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, 1, stride=1,
padding=(1 - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| carankt/FastSpeech2-1 | MultiLayeredConv1d | false | 6,383 | [
"Apache-2.0"
] | 1 | 42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | import torch
class Model(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans: 'int', hidden_chans: 'int', kernel_size:
'int', dropout_rate: 'float'):
super().__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, 1, stride=1,
padding=(1 - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
|
DepthConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sv/csvlv6wvjrp53f4ahzh3lmzvhxwlged7xkff5xqrl6jfc64skj6o.py
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.native_group_norm]
# Source node to ATen node mapping:
# conv2d => convolution
# output => add, add_1, mul_2, rsqrt, var_mean
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-08), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_2), kwargs = {})
triton_per_fused_convolution_native_group_norm_0 = async_compile.triton('triton_per_fused_convolution_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex
r2 = (rindex // 16)
tmp0 = tl.load(in_out_ptr0 + (r3 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp32 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + (r2), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp9 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = tmp8 - tmp18
tmp26 = 64.0
tmp27 = tmp24 / tmp26
tmp28 = 1e-08
tmp29 = tmp27 + tmp28
tmp30 = libdevice.rsqrt(tmp29)
tmp31 = tmp25 * tmp30
tmp33 = tmp31 * tmp32
tmp35 = tmp33 + tmp34
tl.store(in_out_ptr0 + (r3 + (64*x0)), tmp2, xmask)
tl.store(out_ptr2 + (r3 + (64*x0)), tmp35, xmask)
tl.store(out_ptr3 + (x0), tmp30, xmask)
tl.store(out_ptr0 + (x0), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wc/cwclm5fv4xv4euwdcy5hcf4sovrzgd4oxluv3hdvlhung236wxjs.py
# Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.native_group_norm]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# output_1 => add_2, add_3, mul_5, rsqrt_1, var_mean_1
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %primals_7, %primals_8, [1, 1], [4, 4], [1, 1], False, [0, 0], 4), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-08), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_11), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %unsqueeze_8), kwargs = {})
triton_per_fused_convolution_native_group_norm_1 = async_compile.triton('triton_per_fused_convolution_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 4
XBLOCK: tl.constexpr = 1
rnumel = 324
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = rindex < rnumel
r3 = rindex
x0 = xindex
r2 = (rindex // 81)
tmp0 = tl.load(in_out_ptr0 + (r3 + (324*x0)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp32 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr3 + (r2), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = tl.where(rmask, tmp9, 0)
tmp12 = tl.broadcast_to(tmp9, [RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp16 = tl.full([1], 324, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp9 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = tl.where(rmask, tmp21, 0)
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp25 = tmp8 - tmp18
tmp26 = 324.0
tmp27 = tmp24 / tmp26
tmp28 = 1e-08
tmp29 = tmp27 + tmp28
tmp30 = libdevice.rsqrt(tmp29)
tmp31 = tmp25 * tmp30
tmp33 = tmp31 * tmp32
tmp35 = tmp33 + tmp34
tl.store(in_out_ptr0 + (r3 + (324*x0)), tmp2, rmask)
tl.store(out_ptr2 + (r3 + (324*x0)), tmp35, rmask)
tl.store(out_ptr3 + (x0), tmp30, None)
tl.store(out_ptr0 + (x0), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2y/c2yaievmhemktd5uxyiqpcqdksjgyyhguyp576kgpjfy2jk6tilk.py
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# residual => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_3, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_per_fused_convolution_native_group_norm_0.run(buf1, primals_2, primals_4, primals_5, primals_6, buf2, buf5, buf6, 4, 64, grid=grid(4), stream=stream0)
del primals_2
del primals_6
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (4, 4, 9, 9), (324, 81, 9, 1))
buf8 = buf7; del buf7 # reuse
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf12 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, output_1], Original ATen: [aten.convolution, aten.native_group_norm]
triton_per_fused_convolution_native_group_norm_1.run(buf8, primals_8, primals_9, primals_10, primals_11, buf9, buf12, buf13, 4, 324, grid=grid(4), stream=stream0)
del primals_11
del primals_8
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 9, 9), (324, 81, 9, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf15, primals_13, 1296, grid=grid(1296), stream=stream0)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_5, primals_7, primals_9, primals_10, primals_12, buf1, buf5, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1), 0), buf8, buf12, reinterpret_tensor(buf9, (4, 1), (1, 1), 0), reinterpret_tensor(buf13, (4, 1), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DepthConv2d(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, padding,
dilation=1):
super(DepthConv2d, self).__init__()
self.conv2d = nn.Conv2d(input_channel, hidden_channel, 1)
self.padding = padding
self.dconv2d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
dilation=dilation, groups=hidden_channel, padding=self.padding)
self.res_out = nn.Conv2d(hidden_channel, input_channel, 1)
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
self.reg2 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
def forward(self, input):
output = self.reg1(self.nonlinearity1(self.conv2d(input)))
output = self.reg2(self.nonlinearity2(self.dconv2d(output)))
residual = self.res_out(output)
return residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel': 4, 'hidden_channel': 4, 'kernel': 4,
'padding': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x0 = xindex
r2 = rindex // 16
tmp0 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp32 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + r2, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tl.where(xmask, tmp9, 0)
tmp12 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp9 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = tmp8 - tmp18
tmp26 = 64.0
tmp27 = tmp24 / tmp26
tmp28 = 1e-08
tmp29 = tmp27 + tmp28
tmp30 = libdevice.rsqrt(tmp29)
tmp31 = tmp25 * tmp30
tmp33 = tmp31 * tmp32
tmp35 = tmp33 + tmp34
tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp2, xmask)
tl.store(out_ptr2 + (r3 + 64 * x0), tmp35, xmask)
tl.store(out_ptr3 + x0, tmp30, xmask)
tl.store(out_ptr0 + x0, tmp18, xmask)
@triton.jit
def triton_per_fused_convolution_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
rnumel = 324
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex
r2 = rindex // 81
tmp0 = tl.load(in_out_ptr0 + (r3 + 324 * x0), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last', other=0.0
)
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp32 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp34 = tl.load(in_ptr3 + r2, rmask, eviction_policy='evict_last',
other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tl.where(rmask, tmp9, 0)
tmp12 = tl.broadcast_to(tmp9, [RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp16 = tl.full([1], 324, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp9 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = tl.where(rmask, tmp21, 0)
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0))
tmp25 = tmp8 - tmp18
tmp26 = 324.0
tmp27 = tmp24 / tmp26
tmp28 = 1e-08
tmp29 = tmp27 + tmp28
tmp30 = libdevice.rsqrt(tmp29)
tmp31 = tmp25 * tmp30
tmp33 = tmp31 * tmp32
tmp35 = tmp33 + tmp34
tl.store(in_out_ptr0 + (r3 + 324 * x0), tmp2, rmask)
tl.store(out_ptr2 + (r3 + 324 * x0), tmp35, rmask)
tl.store(out_ptr3 + x0, tmp30, None)
tl.store(out_ptr0 + x0, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_convolution_native_group_norm_0[grid(4)](buf1,
primals_2, primals_4, primals_5, primals_6, buf2, buf5, buf6, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
del primals_2
del primals_6
buf7 = extern_kernels.convolution(buf5, primals_7, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf7, (4, 4, 9, 9), (324, 81, 9, 1))
buf8 = buf7
del buf7
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf12 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32
)
buf13 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
triton_per_fused_convolution_native_group_norm_1[grid(4)](buf8,
primals_8, primals_9, primals_10, primals_11, buf9, buf12,
buf13, 4, 324, num_warps=4, num_stages=1)
del primals_11
del primals_8
buf14 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 9, 9), (324, 81, 9, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_2[grid(1296)](buf15, primals_13, 1296,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_5, primals_7,
primals_9, primals_10, primals_12, buf1, buf5, reinterpret_tensor(
buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1),
0), buf8, buf12, reinterpret_tensor(buf9, (4, 1), (1, 1), 0),
reinterpret_tensor(buf13, (4, 1), (1, 1), 0))
class DepthConv2dNew(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, padding,
dilation=1):
super(DepthConv2dNew, self).__init__()
self.conv2d = nn.Conv2d(input_channel, hidden_channel, 1)
self.padding = padding
self.dconv2d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
dilation=dilation, groups=hidden_channel, padding=self.padding)
self.res_out = nn.Conv2d(hidden_channel, input_channel, 1)
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
self.reg2 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_2 = self.conv2d.bias
primals_7 = self.dconv2d.weight
primals_5 = self.dconv2d.bias
primals_12 = self.res_out.weight
primals_6 = self.res_out.bias
primals_4 = self.nonlinearity1.weight
primals_9 = self.nonlinearity2.weight
primals_8 = self.reg1.weight
primals_10 = self.reg1.bias
primals_11 = self.reg2.weight
primals_13 = self.reg2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| c-ma13/sepTFNet | DepthConv2d | false | 6,384 | [
"MIT"
] | 1 | a06c89c080f9449ac2e5090f80d9645deea7f23a | https://github.com/c-ma13/sepTFNet/tree/a06c89c080f9449ac2e5090f80d9645deea7f23a | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, padding,
dilation=1):
super().__init__()
self.conv2d = nn.Conv2d(input_channel, hidden_channel, 1)
self.padding = padding
self.dconv2d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
dilation=dilation, groups=hidden_channel, padding=self.padding)
self.res_out = nn.Conv2d(hidden_channel, input_channel, 1)
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
self.reg2 = nn.GroupNorm(1, hidden_channel, eps=1e-08)
def forward(self, input):
output = self.reg1(self.nonlinearity1(self.conv2d(input)))
output = self.reg2(self.nonlinearity2(self.dconv2d(output)))
residual = self.res_out(output)
return residual
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel': 4, 'hidden_channel': 4, 'kernel': 4,
'padding': 4}]
|
SequenceQuantizerSoftEMA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xd/cxdwtygncgwjegadoo262dgmykjc5ooocufkdnt5on6exr5afkek.py
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qo/cqoswgz4li7la7mfk5hqogs4cs5ts35qdgmou6txmh556cg4ds66.py
# Topologically Sorted Source Nodes: [norm_C], Original ATen: [aten.div]
# Source node to ATen node mapping:
# norm_C => div
# Graph fragment:
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %unsqueeze), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2f/c2f7kqnsuiwcx72eoajz3ea3f3cb2snaodha6q5fh6fzzqf7grnq.py
# Topologically Sorted Source Nodes: [flat_input_1], Original ATen: [aten.div]
# Source node to ATen node mapping:
# flat_input_1 => div_1
# Graph fragment:
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %unsqueeze_1), kwargs = {})
triton_poi_fused_div_2 = async_compile.triton('triton_poi_fused_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s4/cs472yivvl3yzse325afzknsz7ua5dqrqzmwls3lwujk3hte6xkl.py
# Topologically Sorted Source Nodes: [distances], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# distances => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %amax), kwargs = {})
# %exp : [num_users=3] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ac/cacx5coai5q2zpubloixhcevya4zg46hkb7obv6uj4bxkhfvxiho.py
# Topologically Sorted Source Nodes: [distances, l1_loss], Original ATen: [aten._softmax, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# distances => sum_3
# l1_loss => abs_1, div_2, mean
# Graph fragment:
# %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_2 : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%div_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sum_3, 0), kwargs = {})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %where_self), kwargs = {})
triton_red_fused__softmax_abs_mean_sub_4 = async_compile.triton('triton_red_fused__softmax_abs_mean_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__softmax_abs_mean_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__softmax_abs_mean_sub_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.0
tmp10 = tmp7 >= tmp9
tmp11 = 1.0
tmp12 = -1.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tmp0 * tmp13
tmp15 = tl_math.abs(tmp8)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = _tmp17 + tmp16
_tmp17 = tl.where(rmask, tmp18, _tmp17)
tl.store(out_ptr0 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp8, rmask)
tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp14, rmask)
tmp17 = tl.sum(_tmp17, 1)[:, None]
tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wn/cwn374apgni43a3ppr6qe77nkbllfr3vqklgehdirzvyr7ktjzar.py
# Topologically Sorted Source Nodes: [distances], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# distances => sum_3
# Graph fragment:
# %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sum_3, 0), kwargs = {})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %sum_3), kwargs = {})
# %div_tensor : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = 0.0
tmp17 = tmp15 >= tmp16
tmp18 = 1.0
tmp19 = -1.0
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp20 * tmp15
tmp22 = tmp8 / tmp21
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mk/cmkekxfod2trlmvhmtox3g24h7glojkyoniaxupzwfyl3ui5ycnt.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp_1, log, sub_3, sum_4
# Graph fragment:
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor, %log), kwargs = {})
triton_poi_fused__log_softmax_6 = async_compile.triton('triton_poi_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nb/cnbfz7yqwqktl3y6lmkuzeasipeph4twqdqxat3vvukltbfls6aj.py
# Topologically Sorted Source Nodes: [mul_1, sum_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul_1 => mul_1
# sum_1 => sum_5
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, -1), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_per_fused_mul_sum_7 = async_compile.triton('triton_per_fused_mul_sum_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_7(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ae/caerw6od3xnz44vwrdmvyzjjr6suu76zt7s2nityfycvn2pepo3n.py
# Topologically Sorted Source Nodes: [l1_loss, mul, mean, mul_2, loss], Original ATen: [aten.abs, aten.mean, aten.mul, aten.add]
# Source node to ATen node mapping:
# l1_loss => abs_1, mean
# loss => add
# mean => mean_1
# mul => mul
# mul_2 => mul_2
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%div_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1000), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_5,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 5e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
triton_per_fused_abs_add_mean_mul_8 = async_compile.triton('triton_per_fused_abs_add_mean_mul_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mean_mul_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mean_mul_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tmp10 = 64.0
tmp11 = tmp3 / tmp10
tmp12 = 5e-05
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_0.run(primals_2, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_C], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_2, buf0, buf1, 16, grid=grid(16), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [flat_input_1], Original ATen: [aten.div]
triton_poi_fused_div_2.run(primals_1, buf2, 256, grid=grid(256), stream=stream0)
del primals_1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [distances], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = buf3; del buf3 # reuse
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [distances, l1_loss], Original ATen: [aten._softmax, aten.sub, aten.abs, aten.mean]
triton_red_fused__softmax_abs_mean_sub_4.run(buf4, buf5, buf8, buf7, 1, 256, grid=grid(1), stream=stream0)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf5, buf1, out=buf6)
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [distances], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf8, buf4, buf9, 256, grid=grid(256), stream=stream0)
del buf4
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_6.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf9
buf11 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(buf10, reinterpret_tensor(buf5, (4, 64), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, sum_1], Original ATen: [aten.mul, aten.sum]
triton_per_fused_mul_sum_7.run(buf11, buf12, 64, 64, grid=grid(64), stream=stream0)
del buf11
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [l1_loss, mul, mean, mul_2, loss], Original ATen: [aten.abs, aten.mean, aten.mul, aten.add]
triton_per_fused_abs_add_mean_mul_8.run(buf14, buf12, buf7, 1, 64, grid=grid(1), stream=stream0)
del buf12
del buf7
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf14, reinterpret_tensor(buf0, (4, 1), (1, 1), 0), buf1, buf2, buf5, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
class SequenceQuantizerSoftEMA(nn.Module):
def __init__(self, codebook_size, d_model, l1_cost=1000, entropy_cost=
5e-05, num_samples=10, temp=1.0, epsilon=1e-05, padding_idx=None):
super(SequenceQuantizerSoftEMA, self).__init__()
self.d_model = d_model
self.codebook_size = codebook_size
self.padding_idx = padding_idx
self.codebook = nn.Parameter(torch.FloatTensor(self.codebook_size,
self.d_model), requires_grad=True)
torch.nn.init.xavier_uniform_(self.codebook)
self.l1_cost = l1_cost
self.entropy_cost = entropy_cost
self.num_samples = num_samples
self.temp = temp
self._epsilon = epsilon
def entropy(self, tensor):
return torch.mean(torch.sum(-1 * torch.matmul(F.log_softmax(tensor,
dim=1), tensor.t()), dim=1))
def forward(self, inputs, l1_cost=None, entropy_cost=None, temp=None):
if l1_cost is None:
l1_cost = self.l1_cost
if entropy_cost is None:
entropy_cost = self.entropy_cost
if temp is None:
temp = self.temp
input_shape = inputs.size()
flat_input = inputs.reshape(-1, self.d_model)
norm_C = self.codebook / self.codebook.norm(2, dim=1)[:, None]
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, norm_C.t()), dim=1)
reconstruction = torch.matmul(distances, norm_C).view(input_shape)
l1_loss = nn.L1Loss()
loss = l1_cost * l1_loss(distances, torch.zeros_like(distances)
) + entropy_cost * self.entropy(distances)
return reconstruction, loss
def cluster(self, inputs):
input_shape = inputs.size()
inputs.dim()
flat_input = inputs.reshape(-1, self.d_model)
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
codebook = self.codebook / self.codebook.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, codebook.t()).
reshape(-1, self.output_nheads, codebook.shape[0]), dim=2)
reconstruction = torch.matmul(distances, codebook).view(input_shape)
encoding_indices = torch.argmax(distances, dim=1).reshape(-1, self.
output_nheads)
return reconstruction, encoding_indices, distances
def set_codebook(self, new_codebook):
self.codebook.copy_(new_codebook)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'codebook_size': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_red_fused__softmax_abs_mean_sub_4(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.0
tmp10 = tmp7 >= tmp9
tmp11 = 1.0
tmp12 = -1.0
tmp13 = tl.where(tmp10, tmp11, tmp12)
tmp14 = tmp0 * tmp13
tmp15 = tl_math.abs(tmp8)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = _tmp17 + tmp16
_tmp17 = tl.where(rmask, tmp18, _tmp17)
tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp8, rmask)
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp14, rmask
)
tmp17 = tl.sum(_tmp17, 1)[:, None]
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = 0.0
tmp17 = tmp15 >= tmp16
tmp18 = 1.0
tmp19 = -1.0
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp20 * tmp15
tmp22 = tmp8 / tmp21
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_mul_sum_7(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_per_fused_abs_add_mean_mul_8(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 256.0
tmp7 = tmp5 / tmp6
tmp8 = 1000.0
tmp9 = tmp7 * tmp8
tmp10 = 64.0
tmp11 = tmp3 / tmp10
tmp12 = 5e-05
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_0[grid(4)](primals_2, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf0, buf1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_div_2[grid(256)](primals_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = buf3
del buf3
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
triton_red_fused__softmax_abs_mean_sub_4[grid(1)](buf4, buf5, buf8,
buf7, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, buf1, out=buf6)
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_5[grid(256)](buf8, buf4, buf9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf4
buf10 = buf8
del buf8
triton_poi_fused__log_softmax_6[grid(256)](buf9, buf10, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(buf5, (4, 64), (1, 4),
0), out=buf11)
buf12 = empty_strided_cuda((64,), (1,), torch.float32)
triton_per_fused_mul_sum_7[grid(64)](buf11, buf12, 64, 64, XBLOCK=8,
num_warps=4, num_stages=1)
del buf11
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13
del buf13
triton_per_fused_abs_add_mean_mul_8[grid(1)](buf14, buf12, buf7, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del buf12
del buf7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf14, reinterpret_tensor(buf0, (4, 1), (1, 1), 0
), buf1, buf2, buf5, buf10
class SequenceQuantizerSoftEMANew(nn.Module):
def __init__(self, codebook_size, d_model, l1_cost=1000, entropy_cost=
5e-05, num_samples=10, temp=1.0, epsilon=1e-05, padding_idx=None):
super(SequenceQuantizerSoftEMANew, self).__init__()
self.d_model = d_model
self.codebook_size = codebook_size
self.padding_idx = padding_idx
self.codebook = nn.Parameter(torch.FloatTensor(self.codebook_size,
self.d_model), requires_grad=True)
torch.nn.init.xavier_uniform_(self.codebook)
self.l1_cost = l1_cost
self.entropy_cost = entropy_cost
self.num_samples = num_samples
self.temp = temp
self._epsilon = epsilon
def entropy(self, tensor):
return torch.mean(torch.sum(-1 * torch.matmul(F.log_softmax(tensor,
dim=1), tensor.t()), dim=1))
def cluster(self, inputs):
input_shape = inputs.size()
inputs.dim()
flat_input = inputs.reshape(-1, self.d_model)
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
codebook = self.codebook / self.codebook.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, codebook.t()).
reshape(-1, self.output_nheads, codebook.shape[0]), dim=2)
reconstruction = torch.matmul(distances, codebook).view(input_shape)
encoding_indices = torch.argmax(distances, dim=1).reshape(-1, self.
output_nheads)
return reconstruction, encoding_indices, distances
def set_codebook(self, new_codebook):
self.codebook.copy_(new_codebook)
def forward(self, input_0):
primals_2 = self.codebook
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| brcsomnath/SemAE | SequenceQuantizerSoftEMA | false | 6,385 | [
"MIT"
] | 1 | 8da5de73a5b334c6cb0b22eadaaacc35e98126ed | https://github.com/brcsomnath/SemAE/tree/8da5de73a5b334c6cb0b22eadaaacc35e98126ed | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda
class Model(nn.Module):
def __init__(self, codebook_size, d_model, l1_cost=1000, entropy_cost=
5e-05, num_samples=10, temp=1.0, epsilon=1e-05, padding_idx=None):
super().__init__()
self.d_model = d_model
self.codebook_size = codebook_size
self.padding_idx = padding_idx
self.codebook = nn.Parameter(torch.FloatTensor(self.codebook_size,
self.d_model), requires_grad=True)
torch.nn.init.xavier_uniform_(self.codebook)
self.l1_cost = l1_cost
self.entropy_cost = entropy_cost
self.num_samples = num_samples
self.temp = temp
self._epsilon = epsilon
def entropy(self, tensor):
return torch.mean(torch.sum(-1 * torch.matmul(F.log_softmax(tensor,
dim=1), tensor.t()), dim=1))
def forward(self, inputs, l1_cost=None, entropy_cost=None, temp=None):
if l1_cost is None:
l1_cost = self.l1_cost
if entropy_cost is None:
entropy_cost = self.entropy_cost
if temp is None:
temp = self.temp
input_shape = inputs.size()
flat_input = inputs.reshape(-1, self.d_model)
norm_C = self.codebook / self.codebook.norm(2, dim=1)[:, None]
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, norm_C.t()), dim=1)
reconstruction = torch.matmul(distances, norm_C).view(input_shape)
l1_loss = nn.L1Loss()
loss = l1_cost * l1_loss(distances, torch.zeros_like(distances)
) + entropy_cost * self.entropy(distances)
return reconstruction, loss
def cluster(self, inputs):
input_shape = inputs.size()
inputs.dim()
flat_input = inputs.reshape(-1, self.d_model)
flat_input = flat_input / flat_input.norm(2, dim=1)[:, None]
codebook = self.codebook / self.codebook.norm(2, dim=1)[:, None]
distances = F.softmax(torch.matmul(flat_input, codebook.t()).
reshape(-1, self.output_nheads, codebook.shape[0]), dim=2)
reconstruction = torch.matmul(distances, codebook).view(input_shape)
encoding_indices = torch.argmax(distances, dim=1).reshape(-1, self.
output_nheads)
return reconstruction, encoding_indices, distances
def set_codebook(self, new_codebook):
self.codebook.copy_(new_codebook)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BertAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float("-inf")
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = (tmp29 != 0)
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = (tmp33 != 0)
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = (tmp38 != 0)
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
tl.store(out_ptr2 + (x2), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hk/chkirlrxzb52fxbrq2rynamgt7aligt77yn6j6ihfk46whjvd374.py
# Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add_1 => add_1
# pow_1 => pow_1
# s => mean_1
# sub => sub_1
# u => mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_mean_pow_sub_5 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yq/cyqp2dtjka2kdwget3aoidhkk72sxga5xdfgoyaskxfpk55rnzdw.py
# Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# hidden_states_2 => add_3
# mul => mul
# sqrt => sqrt
# sub => sub_1
# u => mean
# x_3 => div_2
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_12), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_6 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
triton_poi_fused_add_mean_pow_sub_5.run(buf13, primals_3, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_6.run(primals_11, buf13, primals_3, buf14, buf15, primals_12, buf16, 64, grid=grid(64), stream=stream0)
del buf14
del buf15
del primals_12
return (buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_3,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11,
buf13, primals_3, buf14, buf15, primals_12, buf16, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf14
del buf15
del primals_12
return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| caldoe/BERT-NL2SPARQL | BertAttention | false | 6,386 | [
"MIT"
] | 1 | 2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8 | https://github.com/caldoe/BERT-NL2SPARQL/tree/2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8 | from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attenti
# ... truncated (>4000 chars) for memory efficiency |
CTLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ua/cua2bnjsaylwpuipejxq7alpkun7mpul6bg5aqbuwcaqjmveshag.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp]
# Source node to ATen node mapping:
# x => clamp_max, clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_11, 0.0001), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.9999), kwargs = {})
triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp9 = tl.load(in_ptr0 + (x0), xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tmp0 == tmp0
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp2 == tmp2
tmp4 = tl.load(in_ptr0 + (x0), tmp1 & xmask, other=0.0)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tl.where(tmp3, tmp5, tmp4)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp1, tmp6, tmp7)
tmp10 = tl.where(tmp1, tmp8, tmp9)
tmp11 = 0.0001
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = 0.9999
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zf/czfenthqf25mhze7vfx44hahxqz6yimpcjkzvso26yr7eo6u23sy.py
# Topologically Sorted Source Nodes: [sigmoid_, sigmoid__1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid_ => sigmoid
# sigmoid__1 => sigmoid_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%select_10,), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%slice_tensor, %sigmoid, 0, 0), kwargs = {})
# %slice_scatter_default : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%arg0_1, %select_scatter_default, 0, 0, 9223372036854775807, 9), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%select_13,), kwargs = {})
# %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%slice_tensor_1, %sigmoid_1, 0, 0), kwargs = {})
# %slice_scatter_default_1 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %select_scatter_default_1, 0, 1, 9223372036854775807, 9), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x0 = xindex % 64
x2 = xindex
tmp35 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = ((-1) + x1) % 9
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = triton_helpers.div_floor_integer((-1) + x1, 9)
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tmp1 == tmp4
tmp11 = tmp10 & tmp6
tmp12 = tmp8 == tmp8
tmp13 = tl.load(in_ptr0 + (x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.load(in_ptr0 + (64 + x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.sigmoid(tmp19)
tmp21 = tl.where(tmp9, tmp14, tmp13)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp11, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (64 + x0 + (576*(triton_helpers.div_floor_integer((-1) + x1, 9)))), tmp6 & xmask, other=0.0)
tmp25 = tl.where(tmp10, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp20, tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp6, tmp26, tmp27)
tmp29 = tmp0 == tmp4
tmp30 = tl.load(in_ptr0 + (x0), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tl.sigmoid(tmp30)
tmp32 = tl.where(tmp12, tmp31, tmp30)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp29, tmp32, tmp33)
tmp36 = tl.where(tmp29, tmp34, tmp35)
tmp37 = tl.where(tmp6, tmp28, tmp36)
tl.store(out_ptr0 + (x2), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f6/cf6ofsya263vgczihfxnr5nnn33lab2qgcajxxcoptlikatpuxuh.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clamp]
# Source node to ATen node mapping:
# x_1 => clamp_max_1, clamp_min_1
# Graph fragment:
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_14, 0.0001), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 0.9999), kwargs = {})
triton_poi_fused_clamp_2 = async_compile.triton('triton_poi_fused_clamp_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp1 = 0.0001
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lo/clopsrfoiffuf2cbnouk5emhc67l3zbukvdky4fzmubywxnsxsfm.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clamp]
# Source node to ATen node mapping:
# x_2 => clamp_max_2, clamp_min_2
# Graph fragment:
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_17, 0.0001), kwargs = {})
# %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 0.9999), kwargs = {})
triton_poi_fused_clamp_3 = async_compile.triton('triton_poi_fused_clamp_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp12 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int64)
tmp1 = tmp0 >= tmp0
tmp2 = tl.full([1], 0, tl.int64)
tmp3 = tmp2 == tmp2
tmp4 = tmp1 & tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.load(in_ptr0 + (128 + x0), tmp4 & xmask, other=0.0)
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.where(tmp6, tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = 0.0001
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = 0.9999
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bk/cbklf57zrqojchqffzuiit2vaxluatjrio3u4jvgsetjlwnzmrw6.py
# Topologically Sorted Source Nodes: [sigmoid__2, sigmoid__3], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid__2 => sigmoid_2
# sigmoid__3 => sigmoid_3
# Graph fragment:
# %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%select_16,), kwargs = {})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%slice_tensor_2, %sigmoid_2, 0, 0), kwargs = {})
# %slice_scatter_default_2 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %select_scatter_default_2, 0, 2, 9223372036854775807, 9), kwargs = {})
# %sigmoid_3 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%select_19,), kwargs = {})
# %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%slice_tensor_3, %sigmoid_3, 0, 0), kwargs = {})
# %slice_scatter_default_3 : [num_users=6] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %select_scatter_default_3, 0, 3, 9223372036854775807, 9), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %slice_scatter_default_3), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x0 = xindex % 64
x2 = xindex
tmp50 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = ((-3) + x1) % 9
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = triton_helpers.div_floor_integer((-3) + x1, 9)
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tl.full([1], 2, tl.int64)
tmp11 = tmp1 >= tmp10
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp4
tmp14 = tmp11 & tmp13
tmp15 = tmp14 & tmp6
tmp16 = tmp8 == tmp8
tmp17 = tl.load(in_ptr0 + (128 + x0), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.where(tmp16, tmp18, tmp17)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (192 + x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.sigmoid(tmp23)
tmp25 = 3 + (9*(triton_helpers.div_floor_integer((-3) + x1, 9)))
tmp26 = tmp25 >= tmp10
tmp27 = tmp26 & tmp13
tmp28 = tmp27 & tmp6
tmp29 = tl.load(in_ptr0 + (128 + x0), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tl.sigmoid(tmp29)
tmp31 = tl.where(tmp9, tmp30, tmp29)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp28, tmp31, tmp32)
tmp34 = tl.load(in_ptr0 + (192 + x0 + (576*(triton_helpers.div_floor_integer((-3) + x1, 9)))), tmp6 & xmask, other=0.0)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp9, tmp24, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp6, tmp36, tmp37)
tmp39 = tmp0 >= tmp10
tmp40 = ((-2) + x1) % 9
tmp41 = tmp40 == tmp4
tmp42 = tmp39 & tmp41
tmp43 = triton_helpers.div_floor_integer((-2) + x1, 9)
tmp44 = tmp43 == tmp8
tmp45 = tl.load(in_ptr0 + (128 + x0), tmp42 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tl.sigmoid(tmp45)
tmp47 = tl.where(tmp44, tmp46, tmp45)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp42, tmp47, tmp48)
tmp51 = tl.where(tmp42, tmp49, tmp50)
tmp52 = tl.where(tmp6, tmp38, tmp51)
tl.store(out_ptr0 + (x2), tmp52, xmask)
tl.store(out_ptr1 + (x2), tmp52, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xb/cxbti2irceniukbw7axsub6qgjxx4xqrlfzp6taijmbepcpd4vuh.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clamp]
# Source node to ATen node mapping:
# x_3 => clamp_max_3, clamp_min_3
# Graph fragment:
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_20, 0.0001), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 0.9999), kwargs = {})
triton_poi_fused_clamp_5 = async_compile.triton('triton_poi_fused_clamp_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = 0.0001
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (10, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_, sigmoid__1], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clamp]
triton_poi_fused_clamp_2.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clamp]
triton_poi_fused_clamp_3.run(buf1, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid__2, sigmoid__3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf1, buf4, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clamp]
triton_poi_fused_clamp_5.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
return (buf0, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 0), buf2, buf3, buf5, reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 64), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 128), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 192), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 256), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 320), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 384), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 448), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 512), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 576), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((10, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
def _neg_loss(preds, gt):
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
for pred in preds:
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2
) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 0.0001)
return regr_loss
def _sigmoid(x):
x = torch.clamp(x.sigmoid_(), min=0.0001, max=1 - 0.0001)
return x
class CTLoss(nn.Module):
def __init__(self, regr_weight=1, focal_loss=_neg_loss):
super(CTLoss, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, outs, targets):
stride = 9
t_heats = outs[0::stride]
l_heats = outs[1::stride]
b_heats = outs[2::stride]
r_heats = outs[3::stride]
ct_heats = outs[4::stride]
t_regrs = outs[5::stride]
l_regrs = outs[6::stride]
b_regrs = outs[7::stride]
r_regrs = outs[8::stride]
gt_t_heat = targets[0]
gt_l_heat = targets[1]
gt_b_heat = targets[2]
gt_r_heat = targets[3]
gt_ct_heat = targets[4]
gt_mask = targets[5]
gt_t_regr = targets[6]
gt_l_regr = targets[7]
gt_b_regr = targets[8]
gt_r_regr = targets[9]
focal_loss = 0
t_heats = [_sigmoid(t) for t in t_heats]
l_heats = [_sigmoid(l) for l in l_heats]
b_heats = [_sigmoid(b) for b in b_heats]
r_heats = [_sigmoid(r) for r in r_heats]
ct_heats = [_sigmoid(ct) for ct in ct_heats]
focal_loss += self.focal_loss(t_heats, gt_t_heat)
focal_loss += self.focal_loss(l_heats, gt_l_heat)
focal_loss += self.focal_loss(b_heats, gt_b_heat)
focal_loss += self.focal_loss(r_heats, gt_r_heat)
focal_loss += self.focal_loss(ct_heats, gt_ct_heat)
regr_loss = 0
for t_regr, l_regr, b_regr, r_regr in zip(t_regrs, l_regrs, b_regrs,
r_regrs):
regr_loss += self.regr_loss(t_regr, gt_t_regr, gt_mask)
regr_loss += self.regr_loss(l_regr, gt_l_regr, gt_mask)
regr_loss += self.regr_loss(b_regr, gt_b_regr, gt_mask)
regr_loss += self.regr_loss(r_regr, gt_r_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + regr_loss) / len(t_heats)
return loss.unsqueeze(0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([10, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp9 = tl.load(in_ptr0 + x0, xmask)
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tmp0 == tmp0
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp2 == tmp2
tmp4 = tl.load(in_ptr0 + x0, tmp1 & xmask, other=0.0)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tl.where(tmp3, tmp5, tmp4)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp1, tmp6, tmp7)
tmp10 = tl.where(tmp1, tmp8, tmp9)
tmp11 = 0.0001
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = 0.9999
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp35 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-1 + x1) % 9
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = triton_helpers.div_floor_integer(-1 + x1, 9)
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tmp1 == tmp4
tmp11 = tmp10 & tmp6
tmp12 = tmp8 == tmp8
tmp13 = tl.load(in_ptr0 + x0, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.load(in_ptr0 + (64 + x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = tl.sigmoid(tmp19)
tmp21 = tl.where(tmp9, tmp14, tmp13)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp11, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (64 + x0 + 576 * triton_helpers.
div_floor_integer(-1 + x1, 9)), tmp6 & xmask, other=0.0)
tmp25 = tl.where(tmp10, tmp23, tmp24)
tmp26 = tl.where(tmp9, tmp20, tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp6, tmp26, tmp27)
tmp29 = tmp0 == tmp4
tmp30 = tl.load(in_ptr0 + x0, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tl.sigmoid(tmp30)
tmp32 = tl.where(tmp12, tmp31, tmp30)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp29, tmp32, tmp33)
tmp36 = tl.where(tmp29, tmp34, tmp35)
tmp37 = tl.where(tmp6, tmp28, tmp36)
tl.store(out_ptr0 + x2, tmp37, xmask)
@triton.jit
def triton_poi_fused_clamp_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp1 = 0.0001
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_clamp_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp12 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int64)
tmp1 = tmp0 >= tmp0
tmp2 = tl.full([1], 0, tl.int64)
tmp3 = tmp2 == tmp2
tmp4 = tmp1 & tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.load(in_ptr0 + (128 + x0), tmp4 & xmask, other=0.0)
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.where(tmp6, tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = 0.0001
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = 0.9999
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tl.store(out_ptr0 + x0, tmp17, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp50 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-3 + x1) % 9
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 == tmp4
tmp6 = tmp2 & tmp5
tmp7 = triton_helpers.div_floor_integer(-3 + x1, 9)
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = tmp7 == tmp8
tmp10 = tl.full([1], 2, tl.int64)
tmp11 = tmp1 >= tmp10
tmp12 = tl.full([1], 1, tl.int64)
tmp13 = tmp12 == tmp4
tmp14 = tmp11 & tmp13
tmp15 = tmp14 & tmp6
tmp16 = tmp8 == tmp8
tmp17 = tl.load(in_ptr0 + (128 + x0), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.where(tmp16, tmp18, tmp17)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (192 + x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.sigmoid(tmp23)
tmp25 = 3 + 9 * triton_helpers.div_floor_integer(-3 + x1, 9)
tmp26 = tmp25 >= tmp10
tmp27 = tmp26 & tmp13
tmp28 = tmp27 & tmp6
tmp29 = tl.load(in_ptr0 + (128 + x0), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tl.sigmoid(tmp29)
tmp31 = tl.where(tmp9, tmp30, tmp29)
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp28, tmp31, tmp32)
tmp34 = tl.load(in_ptr0 + (192 + x0 + 576 * triton_helpers.
div_floor_integer(-3 + x1, 9)), tmp6 & xmask, other=0.0)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp9, tmp24, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp6, tmp36, tmp37)
tmp39 = tmp0 >= tmp10
tmp40 = (-2 + x1) % 9
tmp41 = tmp40 == tmp4
tmp42 = tmp39 & tmp41
tmp43 = triton_helpers.div_floor_integer(-2 + x1, 9)
tmp44 = tmp43 == tmp8
tmp45 = tl.load(in_ptr0 + (128 + x0), tmp42 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp46 = tl.sigmoid(tmp45)
tmp47 = tl.where(tmp44, tmp46, tmp45)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp42, tmp47, tmp48)
tmp51 = tl.where(tmp42, tmp49, tmp50)
tmp52 = tl.where(tmp6, tmp38, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
tl.store(out_ptr1 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused_clamp_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = 0.0001
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (10, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sigmoid_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_3[grid(64)](buf1, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sigmoid_4[grid(256)](buf1, buf4, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clamp_5[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf0, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 0
), buf2, buf3, buf5, reinterpret_tensor(buf4, (0, 4, 4, 4), (576,
16, 4, 1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4,
1), 256), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256
), reinterpret_tensor(buf4, (0, 4, 4, 4), (576, 16, 4, 1), 256
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 64
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 128
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 192
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 256
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 320
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 384
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 448
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 512
), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 4, 1), 576)
def _neg_loss(preds, gt):
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
for pred in preds:
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2
) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 0.0001)
return regr_loss
def _sigmoid(x):
x = torch.clamp(x.sigmoid_(), min=0.0001, max=1 - 0.0001)
return x
class CTLossNew(nn.Module):
def __init__(self, regr_weight=1, focal_loss=_neg_loss):
super(CTLossNew, self).__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| c464851257/extremenet-lite | CTLoss | false | 6,387 | [
"BSD-3-Clause"
] | 1 | 331446f2c5d9524d46d2b33823eff02416f43052 | https://github.com/c464851257/extremenet-lite/tree/331446f2c5d9524d46d2b33823eff02416f43052 | import torch
import torch.nn as nn
import torch.onnx
def _neg_loss(preds, gt):
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
for pred in preds:
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2
) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _regr_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 0.0001)
return regr_loss
def _sigmoid(x):
x = torch.clamp(x.sigmoid_(), min=0.0001, max=1 - 0.0001)
return x
class Model(nn.Module):
def __init__(self, regr_weight=1, focal_loss=_neg_loss):
super().__init__()
self.regr_weight = regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, outs, targets):
stride = 9
t_heats = outs[0::stride]
l_heats = outs[1::stride]
b_heats = outs[2::stride]
r_heats = outs[3::stride]
ct_heats = outs[4::stride]
t_regrs = outs[5::stride]
l_regrs = outs[6::stride]
b_regrs = outs[7::stride]
r_regrs = outs[8::stride]
gt_t_heat = targets[0]
gt_l_heat = targets[1]
gt_b_heat = targets[2]
gt_r_heat = targets[3]
gt_ct_heat = targets[4]
gt_mask = targets[5]
gt_t_regr = targets[6]
gt_l_regr = targets[7]
gt_b_regr = targets[8]
gt_r_regr = targets[9]
focal_loss = 0
t_heats = [_sigmoid(t) for t in t_heats]
l_heats = [_sigmoid(l) for l in l_heats]
b_heats = [_sigmoid(b) for b in b_heats]
r_heats = [_sigmoid(r) for r in r_heats]
ct_heats = [_sigmoid(ct) for ct in ct_heats]
focal_loss += self.focal_loss(t_heats, gt_t_heat)
focal_loss += self.focal_loss(l_heats, gt_l_heat)
focal_loss += self.focal_loss(b_heats, gt_b_heat)
focal_loss += self.focal_loss(r_heats, gt_r_heat)
focal_loss += self.focal_loss(ct_heats, gt_ct_heat)
regr_loss = 0
for t_regr, l_regr, b_regr, r_regr in zip(t_regrs, l_regrs, b_regrs,
r_regrs):
regr_loss += self.regr_loss(t_regr, gt_t_regr, gt_mask)
regr_loss += self.regr_loss(l_regr, gt_l_regr, gt_mask)
regr_loss += self.regr_loss(b_regr, gt_b_regr, gt_mask)
regr_loss += self.regr_loss(r_regr, gt_r_regr, gt_mask)
regr_loss = self.regr_weight * regr_loss
loss = (focal_loss + regr_loss) / len(t_heats)
return loss.unsqueeze(0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([10, 4, 4, 4])]
def get_init_inputs():
return []
|
upsampleBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cg/ccgupjidh7vzwqy3i33hof3c5b5ytgxw3bw52fvz6g4mshaw5o5v.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*(x0 % 2)) + (32*(x1 % 2)) + (64*x2) + (x0 // 2)), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def swish(x):
return x * torch.sigmoid(x)
class upsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, x):
return swish(self.shuffler(self.conv(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x2 + x0 // 2), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 8, 8), (64, 64, 8, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def swish(x):
return x * torch.sigmoid(x)
class upsampleBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsampleBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| carl-zjr/super-resolution-reconstruction | upsampleBlock | false | 6,388 | [
"Apache-2.0"
] | 1 | 37b5b42ea6e8864c12a93a7e90d3bf0920f502d4 | https://github.com/carl-zjr/super-resolution-reconstruction/tree/37b5b42ea6e8864c12a93a7e90d3bf0920f502d4 | import torch
import torch.nn as nn
def swish(x):
return x * torch.sigmoid(x)
class Model(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1
)
self.shuffler = nn.PixelShuffle(2)
def forward(self, x):
return swish(self.shuffler(self.conv(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SeparableConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf2, primals_1, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
class SeparableConvBlock(nn.Module):
def __init__(self, inplanes, planes):
super(SeparableConvBlock, self).__init__()
self.depthwise_conv = nn.Conv2d(inplanes, inplanes, kernel_size=3,
stride=1, padding=1, groups=inplanes, bias=False)
self.pointwise_conv = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf2, primals_1, primals_2, primals_3, buf0
class SeparableConvBlockNew(nn.Module):
def __init__(self, inplanes, planes):
super(SeparableConvBlockNew, self).__init__()
self.depthwise_conv = nn.Conv2d(inplanes, inplanes, kernel_size=3,
stride=1, padding=1, groups=inplanes, bias=False)
self.pointwise_conv = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, input_0):
primals_1 = self.depthwise_conv.weight
primals_3 = self.pointwise_conv.weight
primals_4 = self.pointwise_conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training | SeparableConvBlock | false | 6,389 | [
"MIT"
] | 1 | e8b37046e6fbe914f6a68bbde1fe419c46373c1d | https://github.com/carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training/tree/e8b37046e6fbe914f6a68bbde1fe419c46373c1d | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
class Model(nn.Module):
def __init__(self, inplanes, planes):
super().__init__()
self.depthwise_conv = nn.Conv2d(inplanes, inplanes, kernel_size=3,
stride=1, padding=1, groups=inplanes, bias=False)
self.pointwise_conv = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=1, padding=0, bias=True)
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GlobalChannelLayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tz/ctz4vznlbejufkqdd5ry6w7p7jmz72rshj32pjvkkbyr64dndrxr.py
# Topologically Sorted Source Nodes: [mean, sub, pow_1, var, mul, add, sqrt, truediv, x], Original ATen: [aten.mean, aten.sub, aten.pow, aten.mul, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# pow_1 => pow_1
# sqrt => sqrt
# sub => sub
# truediv => div
# var => mean_1
# x => add_1
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1, 2], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1, 2], True), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp17 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp18 = tmp17 * tmp7
tmp19 = tmp18 / tmp16
tmp21 = tmp19 + tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp16, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, pow_1, var, mul, add, sqrt, truediv, x], Original ATen: [aten.mean, aten.sub, aten.pow, aten.mul, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 16, grid=grid(4), stream=stream0)
del primals_2
del primals_3
return (buf4, primals_1, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GlobalChannelLayerNorm(nn.Module):
"""
Global channel layer normalization
"""
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalChannelLayerNorm, self).__init__()
self.eps = eps
self.normalized_dim = dim
self.elementwise_affine = elementwise_affine
if elementwise_affine:
self.beta = nn.Parameter(torch.zeros(dim, 1))
self.gamma = nn.Parameter(torch.ones(dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
"""
x: N x C x T
"""
if x.dim() != 3:
raise RuntimeError('{} accept 3D tensor as input'.format(self.
__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
if self.elementwise_affine:
x = self.gamma * (x - mean) / torch.sqrt(var + self.eps
) + self.beta
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
def extra_repr(self):
return (
'{normalized_dim}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp17 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp18 = tmp17 * tmp7
tmp19 = tmp18 / tmp16
tmp21 = tmp19 + tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp16, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_pow_sqrt_sub_0[grid(4)](buf1,
buf3, primals_1, primals_2, primals_3, buf4, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf4, primals_1, buf1, buf3
class GlobalChannelLayerNormNew(nn.Module):
"""
Global channel layer normalization
"""
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalChannelLayerNormNew, self).__init__()
self.eps = eps
self.normalized_dim = dim
self.elementwise_affine = elementwise_affine
if elementwise_affine:
self.beta = nn.Parameter(torch.zeros(dim, 1))
self.gamma = nn.Parameter(torch.ones(dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def extra_repr(self):
return (
'{normalized_dim}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| c-ma13/sepTFNet | GlobalChannelLayerNorm | false | 6,390 | [
"MIT"
] | 1 | a06c89c080f9449ac2e5090f80d9645deea7f23a | https://github.com/c-ma13/sepTFNet/tree/a06c89c080f9449ac2e5090f80d9645deea7f23a | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Global channel layer normalization
"""
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super().__init__()
self.eps = eps
self.normalized_dim = dim
self.elementwise_affine = elementwise_affine
if elementwise_affine:
self.beta = nn.Parameter(torch.zeros(dim, 1))
self.gamma = nn.Parameter(torch.ones(dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
"""
x: N x C x T
"""
if x.dim() != 3:
raise RuntimeError('{} accept 3D tensor as input'.format(self.
__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
if self.elementwise_affine:
x = self.gamma * (x - mean) / torch.sqrt(var + self.eps
) + self.beta
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
def extra_repr(self):
return (
'{normalized_dim}, eps={eps}, elementwise_affine={elementwise_affine}'
.format(**self.__dict__))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
HighwayNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mx/cmxbsuduhrb3re3kyyjzil6fiub7wk6y3jw7tjv4rnhxateuwfne.py
# Topologically Sorted Source Nodes: [g, relu, mul, sub, mul_1, y], Original ATen: [aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# g => sigmoid
# mul => mul
# mul_1 => mul_1
# relu => relu
# sub => sub
# y => add
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [g, relu, mul, sub, mul_1, y], Original ATen: [aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf1, buf0, primals_3, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class HighwayNetwork(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1.0 - g) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
class HighwayNetworkNew(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, input_0):
primals_1 = self.W1.weight
primals_2 = self.W1.bias
primals_4 = self.W2.weight
primals_5 = self.W2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| cassiavb/Tacotron | HighwayNetwork | false | 6,391 | [
"MIT"
] | 1 | 946408f8cd7b5fe9c53931c631267ba2a723910d | https://github.com/cassiavb/Tacotron/tree/946408f8cd7b5fe9c53931c631267ba2a723910d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.0)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1.0 - g) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LevelVariabilityLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ok/cok532ddqmz6anqcngyyyvukjpngrtca54uc3pkaq22qiywlnfqt.py
# Topologically Sorted Source Nodes: [diff, level_var_loss, mean, level_var_loss_1], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# diff => sub_1
# level_var_loss => pow_1
# level_var_loss_1 => mul
# mean => mean
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %slice_8), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {})
triton_per_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [diff, level_var_loss, mean, level_var_loss_1], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0.run(buf1, arg0_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LevelVariabilityLoss(nn.Module):
"""Computes the variability penalty for the level.
levels: levels obtained from exponential smoothing component of ESRNN.
tensor with shape (batch, n_time).
level_variability_penalty: float.
return: level_var_loss
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLoss, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, levels):
assert levels.shape[1] > 2
level_prev = torch.log(levels[:, :-1])
level_next = torch.log(levels[:, 1:])
log_diff_of_levels = torch.sub(level_prev, level_next)
log_diff_prev = log_diff_of_levels[:, :-1]
log_diff_next = log_diff_of_levels[:, 1:]
diff = torch.sub(log_diff_prev, log_diff_next)
level_var_loss = diff ** 2
level_var_loss = level_var_loss.mean() * self.level_variability_penalty
return level_var_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'level_variability_penalty': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 32
r1 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp1 = tl_math.log(tmp0)
tmp3 = tl_math.log(tmp2)
tmp4 = tmp1 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp4 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 128.0
tmp14 = tmp12 / tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, 1, 128,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LevelVariabilityLossNew(nn.Module):
"""Computes the variability penalty for the level.
levels: levels obtained from exponential smoothing component of ESRNN.
tensor with shape (batch, n_time).
level_variability_penalty: float.
return: level_var_loss
"""
def __init__(self, level_variability_penalty):
super(LevelVariabilityLossNew, self).__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cchallu/esrnn | LevelVariabilityLoss | false | 6,392 | [
"MIT"
] | 1 | 543ca365c70be2775a4b5863820b246071ccde3c | https://github.com/cchallu/esrnn/tree/543ca365c70be2775a4b5863820b246071ccde3c | import torch
import torch.nn as nn
class Model(nn.Module):
"""Computes the variability penalty for the level.
levels: levels obtained from exponential smoothing component of ESRNN.
tensor with shape (batch, n_time).
level_variability_penalty: float.
return: level_var_loss
"""
def __init__(self, level_variability_penalty):
super().__init__()
self.level_variability_penalty = level_variability_penalty
def forward(self, levels):
assert levels.shape[1] > 2
level_prev = torch.log(levels[:, :-1])
level_next = torch.log(levels[:, 1:])
log_diff_of_levels = torch.sub(level_prev, level_next)
log_diff_prev = log_diff_of_levels[:, :-1]
log_diff_next = log_diff_of_levels[:, 1:]
diff = torch.sub(log_diff_prev, log_diff_next)
level_var_loss = diff ** 2
level_var_loss = level_var_loss.mean() * self.level_variability_penalty
return level_var_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MultiHeadedAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x7/cx727joiftultx46mv2v4nj3wq4ckralwwhfk6nlqptb654rmnit.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sj/csjx772qtehbicvkv5vtkhqu3yqj65tbhzk7oih4tz37sax3j6wq.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 16, 16], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_per_fused_1 = async_compile.triton('triton_per_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float("-inf")
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = (tmp14 != 0)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fs/cfsktp6ekva62tzoyn5kreys7zax64otksvrzq3eopzdnvtsux4l.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_per_fused_1.run(buf5, buf9, 256, 16, grid=grid(256), stream=stream0)
del buf5
buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf2, primals_8, buf10, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf11, buf12, 64, 4, grid=grid(64, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_11
return (reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
from typing import Optional
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Compute 'Scaled Dot Product Attention'
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor mask: (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attentined and transformed `value` (batch, time1, d_model)
weighted by the query dot key attention (batch, head, time1, time2)
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
mask = mask
scores = scores.masked_fill_(mask, -np.inf)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, v)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused_1[grid(256)](buf5, buf9, 256, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_2[grid(16, 16)](buf2, primals_8, buf10, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0
), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10
class MultiHeadedAttentionNew(nn.Module):
"""Multi-Head Attention layer
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
super(MultiHeadedAttentionNew, self).__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_10 = self.linear_out.weight
primals_11 = self.linear_out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| carankt/FastSpeech2-1 | MultiHeadedAttention | false | 6,393 | [
"Apache-2.0"
] | 1 | 42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | https://github.com/carankt/FastSpeech2-1/tree/42c06e4fbdf741a0719154d1cb4617b7d3f15a5c | import math
import torch
import numpy as np
from typing import Optional
from torch import nn
class Model(nn.Module):
"""Multi-Head Attention layer
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'):
super().__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', mask: 'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Compute 'Scaled Dot Product Attention'
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor mask: (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attentined and transformed `value` (batch, time1, d_model)
weighted by the query dot key attention (batch, head, time1, time2)
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
if mask is not None:
mask = mask.unsqueeze(1).eq(0)
mask = mask
scores = scores.masked_fill_(mask, -np.inf)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, v)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
MaskedInstanceNorm1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hv/chvlig3vkflhzd7ivdbf6ojzalx44fop7l6z3mhozziz64jmw5ck.py
# Topologically Sorted Source Nodes: [mul, sum_2, cnt, cnt_for_mu, mu, sub, sigma, mul_1, sum_3, sub_1, cnt_fot_sigma, sigma_1, add, sigma_2], Original ATen: [aten.mul, aten.sum, aten.clamp, aten.div, aten.sub, aten.pow, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# cnt => sum_1
# cnt_for_mu => clamp_max, clamp_min
# cnt_fot_sigma => clamp_max_1, clamp_min_1
# mu => div
# mul => mul
# mul_1 => mul_1
# sigma => pow_1
# sigma_1 => div_1
# sigma_2 => sqrt
# sub => sub
# sub_1 => sub_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%unsqueeze, [-1], True), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_1, 1.0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 100000.0), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %clamp_max), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %div), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %unsqueeze), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 1.0), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 100000.0), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %clamp_max_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x0 = xindex % 16
x2 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = 1.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = 100000.0
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tmp14 / tmp21
tmp23 = tmp0 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp24 * tmp1
tmp26 = tmp3 - tmp22
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp4
tmp29 = tmp25 + tmp28
tmp30 = tmp7 - tmp22
tmp31 = tmp30 * tmp30
tmp32 = tmp31 * tmp8
tmp33 = tmp29 + tmp32
tmp34 = tmp11 - tmp22
tmp35 = tmp34 * tmp34
tmp36 = tmp35 * tmp12
tmp37 = tmp33 + tmp36
tmp38 = tmp17 - tmp18
tmp39 = triton_helpers.maximum(tmp38, tmp18)
tmp40 = triton_helpers.minimum(tmp39, tmp20)
tmp41 = tmp37 / tmp40
tmp42 = 1e-08
tmp43 = tmp41 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tl.store(out_ptr0 + (x4), tmp22, xmask)
tl.store(in_out_ptr0 + (x4), tmp44, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/x7/cx7htd3xitvxaxetefd5p23n7ecwnggzfyxx3t47rzzxl37p6cg2.py
# Topologically Sorted Source Nodes: [cnt, sub_2, sub_1, cnt_fot_sigma, sigma_1, add, sigma_2, y], Original ATen: [aten.sum, aten.sub, aten.clamp, aten.div, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# cnt => sum_1
# cnt_fot_sigma => clamp_max_1, clamp_min_1
# sigma_1 => div_1
# sigma_2 => sqrt
# sub_1 => sub_1
# sub_2 => sub_2
# y => div_2
# Graph fragment:
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%unsqueeze, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %div), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 1.0), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 100000.0), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %clamp_max_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt), kwargs = {})
triton_poi_fused_add_clamp_div_sqrt_sub_sum_1 = async_compile.triton('triton_poi_fused_add_clamp_div_sqrt_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sqrt_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sqrt_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + (x5), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, sum_2, cnt, cnt_for_mu, mu, sub, sigma, mul_1, sum_3, sub_1, cnt_fot_sigma, sigma_1, add, sigma_2], Original ATen: [aten.mul, aten.sum, aten.clamp, aten.div, aten.sub, aten.pow, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0.run(buf2, arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cnt, sub_2, sub_1, cnt_fot_sigma, sigma_1, add, sigma_2, y], Original ATen: [aten.sum, aten.sub, aten.clamp, aten.div, aten.add, aten.sqrt]
triton_poi_fused_add_clamp_div_sqrt_sub_sum_1.run(arg1_1, buf0, buf2, buf3, 1024, grid=grid(1024), stream=stream0)
del arg1_1
del buf0
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
from torch import nn
import torch.utils.data
import torch.optim
class MaskedInstanceNorm1d(nn.Module):
"""Instance norm + masking."""
MAX_CNT = 100000.0
def __init__(self, d_channel: 'int', unbiased: 'bool'=True, affine:
'bool'=False):
super().__init__()
self.d_channel = d_channel
self.unbiased = unbiased
self.affine = affine
if self.affine:
gamma = torch.ones(d_channel, dtype=torch.float)
beta = torch.zeros_like(gamma)
self.register_parameter('gamma', nn.Parameter(gamma))
self.register_parameter('beta', nn.Parameter(beta))
def forward(self, x: 'torch.Tensor', x_mask: 'torch.Tensor'
) ->torch.Tensor:
"""`x`: [B,C,T], `x_mask`: [B,T] => [B,C,T]."""
x_mask = x_mask.unsqueeze(1).type_as(x)
cnt = x_mask.sum(dim=-1, keepdim=True)
cnt_for_mu = cnt.clamp(1.0, self.MAX_CNT)
mu = (x * x_mask).sum(dim=-1, keepdim=True) / cnt_for_mu
sigma = (x - mu) ** 2
cnt_fot_sigma = (cnt - int(self.unbiased)).clamp(1.0, self.MAX_CNT)
sigma = (sigma * x_mask).sum(dim=-1, keepdim=True) / cnt_fot_sigma
sigma = (sigma + 1e-08).sqrt()
y = (x - mu) / sigma
if self.affine:
gamma = self.gamma.unsqueeze(0).unsqueeze(-1)
beta = self.beta.unsqueeze(0).unsqueeze(-1)
y = y * gamma + beta
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_channel': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.cuda
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x0 = xindex % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0 + 64 * x2), xmask, eviction_policy
='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = 1.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = 100000.0
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tmp14 / tmp21
tmp23 = tmp0 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tmp24 * tmp1
tmp26 = tmp3 - tmp22
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp4
tmp29 = tmp25 + tmp28
tmp30 = tmp7 - tmp22
tmp31 = tmp30 * tmp30
tmp32 = tmp31 * tmp8
tmp33 = tmp29 + tmp32
tmp34 = tmp11 - tmp22
tmp35 = tmp34 * tmp34
tmp36 = tmp35 * tmp12
tmp37 = tmp33 + tmp36
tmp38 = tmp17 - tmp18
tmp39 = triton_helpers.maximum(tmp38, tmp18)
tmp40 = triton_helpers.minimum(tmp39, tmp20)
tmp41 = tmp37 / tmp40
tmp42 = 1e-08
tmp43 = tmp41 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tl.store(out_ptr0 + x4, tmp22, xmask)
tl.store(in_out_ptr0 + x4, tmp44, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sqrt_sub_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x5, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_div_mul_pow_sqrt_sub_sum_0[grid(256)](buf2,
arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_clamp_div_sqrt_sub_sum_1[grid(1024)](arg1_1,
buf0, buf2, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
del buf0
del buf2
return buf3,
class MaskedInstanceNorm1dNew(nn.Module):
"""Instance norm + masking."""
MAX_CNT = 100000.0
def __init__(self, d_channel: 'int', unbiased: 'bool'=True, affine:
'bool'=False):
super().__init__()
self.d_channel = d_channel
self.unbiased = unbiased
self.affine = affine
if self.affine:
gamma = torch.ones(d_channel, dtype=torch.float)
beta = torch.zeros_like(gamma)
self.register_parameter('gamma', nn.Parameter(gamma))
self.register_parameter('beta', nn.Parameter(beta))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| carolmanderson/NeMo | MaskedInstanceNorm1d | false | 6,394 | [
"Apache-2.0"
] | 1 | be7114e2d983af751e1af4119465c626682747b7 | https://github.com/carolmanderson/NeMo/tree/be7114e2d983af751e1af4119465c626682747b7 | import torch
import torch.cuda
from torch import nn
import torch.utils.data
import torch.optim
class Model(nn.Module):
"""Instance norm + masking."""
MAX_CNT = 100000.0
def __init__(self, d_channel: 'int', unbiased: 'bool'=True, affine:
'bool'=False):
super().__init__()
self.d_channel = d_channel
self.unbiased = unbiased
self.affine = affine
if self.affine:
gamma = torch.ones(d_channel, dtype=torch.float)
beta = torch.zeros_like(gamma)
self.register_parameter('gamma', nn.Parameter(gamma))
self.register_parameter('beta', nn.Parameter(beta))
def forward(self, x: 'torch.Tensor', x_mask: 'torch.Tensor'
) ->torch.Tensor:
"""`x`: [B,C,T], `x_mask`: [B,T] => [B,C,T]."""
x_mask = x_mask.unsqueeze(1).type_as(x)
cnt = x_mask.sum(dim=-1, keepdim=True)
cnt_for_mu = cnt.clamp(1.0, self.MAX_CNT)
mu = (x * x_mask).sum(dim=-1, keepdim=True) / cnt_for_mu
sigma = (x - mu) ** 2
cnt_fot_sigma = (cnt - int(self.unbiased)).clamp(1.0, self.MAX_CNT)
sigma = (sigma * x_mask).sum(dim=-1, keepdim=True) / cnt_fot_sigma
sigma = (sigma + 1e-08).sqrt()
y = (x - mu) / sigma
if self.affine:
gamma = self.gamma.unsqueeze(0).unsqueeze(-1)
beta = self.beta.unsqueeze(0).unsqueeze(-1)
y = y * gamma + beta
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaxPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2n/c2nhxfhhf43s4mzhl74kwmoffykfzkszr5r7wlrzxvvadgbylpje.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%permute, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp17 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp21 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp25 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask)
tmp27 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask)
tmp29 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import *
from torch import nn
class MaxPool2d(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=
padding, dilation=dilation, return_indices=return_indices,
ceil_mode=ceil_mode)
def forward(self, x):
*batch, height, width, channels = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
x = self.pool(x)
x = torch.einsum('nchw->nhwc', [x])
_, new_height, new_width, _ = x.shape
x = x.contiguous()
x = x.view(*batch, new_height, new_width, channels)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from typing import *
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp29 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 1, 4), (4, 4, 4, 1), 0),
class MaxPool2dNew(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=
padding, dilation=dilation, return_indices=return_indices,
ceil_mode=ceil_mode)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cbarrick/csb | MaxPool2d | false | 6,395 | [
"MIT"
] | 1 | 0368036ddb7594c0b6e7cdc704aeec918786e58a | https://github.com/cbarrick/csb/tree/0368036ddb7594c0b6e7cdc704aeec918786e58a | import torch
from typing import *
from torch import nn
class Model(nn.Module):
def __init__(self, kernel_size, **kwargs):
super().__init__()
stride = kwargs.setdefault('stride', kernel_size)
padding = kwargs.setdefault('padding', 0)
dilation = kwargs.setdefault('dilation', 1)
return_indices = kwargs.setdefault('return_indices', False)
ceil_mode = kwargs.setdefault('ceil_mode', False)
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=
padding, dilation=dilation, return_indices=return_indices,
ceil_mode=ceil_mode)
def forward(self, x):
*batch, height, width, channels = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
x = self.pool(x)
x = torch.einsum('nchw->nhwc', [x])
_, new_height, new_width, _ = x.shape
x = x.contiguous()
x = x.view(*batch, new_height, new_width, channels)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DeepNeuralNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf18, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf17, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf16, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf7, primals_9, buf15, 256, grid=grid(256), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf9, primals_11, buf14, 256, grid=grid(256), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf11, primals_13, buf13, 256, grid=grid(256), stream=stream0)
del primals_13
buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_14, buf13, primals_12, buf14, primals_10, buf15, primals_8, buf16, primals_6, buf17, primals_4, buf18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class DeepNeuralNet(torch.nn.Module):
"""
This is a six-layer neural network.
This is the default network for initializing sigma and center parameters
"""
def __init__(self, n_feature, n_hidden1, n_hidden2, n_hidden3,
n_hidden4, n_hidden5, n_hidden6, n_output):
"""
Initialization
:param n_feature: Feature number
:param n_hidden: the number of hidden layer neurons
:param n_output: output number
"""
super(DeepNeuralNet, self).__init__()
self.fc1 = torch.nn.Linear(n_feature, n_hidden1)
self.fc2 = torch.nn.Linear(n_hidden1, n_hidden2)
self.fc3 = torch.nn.Linear(n_hidden2, n_hidden3)
self.fc4 = torch.nn.Linear(n_hidden3, n_hidden4)
self.fc5 = torch.nn.Linear(n_hidden4, n_hidden5)
self.fc6 = torch.nn.Linear(n_hidden5, n_hidden6)
self.predict = torch.nn.Linear(n_hidden6, n_output)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.relu(self.fc3(x))
x = torch.relu(self.fc4(x))
x = torch.relu(self.fc5(x))
x = torch.relu(self.fc6(x))
x = self.predict(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feature': 4, 'n_hidden1': 4, 'n_hidden2': 4,
'n_hidden3': 4, 'n_hidden4': 4, 'n_hidden5': 4, 'n_hidden6': 4,
'n_output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf18, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5,
primals_7, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7,
primals_9, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf9,
primals_11, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf11,
primals_13, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_15
return (reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1),
0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(
buf11, (64, 4), (4, 1), 0), primals_14, buf13, primals_12, buf14,
primals_10, buf15, primals_8, buf16, primals_6, buf17, primals_4, buf18
)
class DeepNeuralNetNew(torch.nn.Module):
"""
This is a six-layer neural network.
This is the default network for initializing sigma and center parameters
"""
def __init__(self, n_feature, n_hidden1, n_hidden2, n_hidden3,
n_hidden4, n_hidden5, n_hidden6, n_output):
"""
Initialization
:param n_feature: Feature number
:param n_hidden: the number of hidden layer neurons
:param n_output: output number
"""
super(DeepNeuralNetNew, self).__init__()
self.fc1 = torch.nn.Linear(n_feature, n_hidden1)
self.fc2 = torch.nn.Linear(n_hidden1, n_hidden2)
self.fc3 = torch.nn.Linear(n_hidden2, n_hidden3)
self.fc4 = torch.nn.Linear(n_hidden3, n_hidden4)
self.fc5 = torch.nn.Linear(n_hidden4, n_hidden5)
self.fc6 = torch.nn.Linear(n_hidden5, n_hidden6)
self.predict = torch.nn.Linear(n_hidden6, n_output)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_14 = self.predict.weight
primals_15 = self.predict.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| cassberk/xps_peakfit | DeepNeuralNet | false | 6,396 | [
"MIT"
] | 1 | bbdd62dbfc4d64ec2af0c509361de81b0762bd41 | https://github.com/cassberk/xps_peakfit/tree/bbdd62dbfc4d64ec2af0c509361de81b0762bd41 | import torch
class Model(torch.nn.Module):
"""
This is a six-layer neural network.
This is the default network for initializing sigma and center parameters
"""
def __init__(self, n_feature, n_hidden1, n_hidden2, n_hidden3,
n_hidden4, n_hidden5, n_hidden6, n_output):
"""
Initialization
:param n_feature: Feature number
:param n_hidden: the number of hidden layer neurons
:param n_output: output number
"""
super().__init__()
self.fc1 = torch.nn.Linear(n_feature, n_hidden1)
self.fc2 = torch.nn.Linear(n_hidden1, n_hidden2)
self.fc3 = torch.nn.Linear(n_hidden2, n_hidden3)
self.fc4 = torch.nn.Linear(n_hidden3, n_hidden4)
self.fc5 = torch.nn.Linear(n_hidden4, n_hidden5)
self.fc6 = torch.nn.Linear(n_hidden5, n_hidden6)
self.predict = torch.nn.Linear(n_hidden6, n_output)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.relu(self.fc3(x))
x = torch.relu(self.fc4(x))
x = torch.relu(self.fc5(x))
x = torch.relu(self.fc6(x))
x = self.predict(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feature': 4, 'n_hidden1': 4, 'n_hidden2': 4,
'n_hidden3': 4, 'n_hidden4': 4, 'n_hidden5': 4, 'n_hidden6': 4,
'n_output': 4}]
|
ConvReLUNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gy/cgylhqrp7uo6ulwwvwcaavkdj6lb3xbmk43eobqwwqruey33hhwc.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + (x2), tmp13, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pa/cpaccjq76xvjux47qgs7wy4lqiq65radyhdwdqtrvh44sdzkani6.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2 + (4*y3)), tmp10, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_4, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del buf2
del buf3
del primals_5
return (reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
import torch.utils.data
import torch.optim
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, padding=kernel_size // 2)
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = torch.nn.functional.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2)
return self.dropout(out)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.cuda
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(16, 4)](buf1, buf2, buf3,
primals_4, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16,
num_warps=1, num_stages=1)
del buf2
del buf3
del primals_5
return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0
), primals_1, primals_3, primals_4, buf1
class ConvReLUNormNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNormNew, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, padding=kernel_size // 2)
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.norm.weight
primals_5 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| carolmanderson/NeMo | ConvReLUNorm | false | 6,397 | [
"Apache-2.0"
] | 1 | be7114e2d983af751e1af4119465c626682747b7 | https://github.com/carolmanderson/NeMo/tree/be7114e2d983af751e1af4119465c626682747b7 | import torch
import torch.cuda
import torch.utils.data
import torch.optim
class Model(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super().__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, padding=kernel_size // 2)
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = torch.nn.functional.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2)
return self.dropout(out)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SineLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ej/cejzhnnynxtkiot2qt7feea4bkwhxo5g2qmtwe2jbyvjefkkzt6m.py
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# sin => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 30), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_sin_0 = async_compile.triton('triton_poi_fused_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sin_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class SineLayer(nn.Module):
def __init__(self, in_features, out_features, bias=True, is_first=False,
omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input):
return torch.sin(self.omega_0 * self.linear(input))
def forward_with_intermediate(self, input):
intermediate = self.omega_0 * self.linear(input)
return torch.sin(intermediate), intermediate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 30.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class SineLayerNew(nn.Module):
def __init__(self, in_features, out_features, bias=True, is_first=False,
omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward_with_intermediate(self, input):
intermediate = self.omega_0 * self.linear(input)
return torch.sin(intermediate), intermediate
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ccxiaotoancai/Anim-NeRF | SineLayer | false | 6,398 | [
"MIT"
] | 1 | 1342a9e2d02411a09acecac40ac325f38708b9c9 | https://github.com/ccxiaotoancai/Anim-NeRF/tree/1342a9e2d02411a09acecac40ac325f38708b9c9 | import torch
import numpy as np
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_features, out_features, bias=True, is_first=False,
omega_0=30):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self
.in_features)
else:
self.linear.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, input):
return torch.sin(self.omega_0 * self.linear(input))
def forward_with_intermediate(self, input):
intermediate = self.omega_0 * self.linear(input)
return torch.sin(intermediate), intermediate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Generator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_2 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_sigmoid_1[grid(256)](buf5, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf5, primals_6, buf6, primals_4, buf7
class GeneratorNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| cclaypool/pytorch-dcgan | Generator | false | 6,399 | [
"MIT"
] | 1 | a2096daf7bb75bf95e189bb3d2f820c51147b61c | https://github.com/cclaypool/pytorch-dcgan/tree/a2096daf7bb75bf95e189bb3d2f820c51147b61c | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Generator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ck/cck6zsxedo53nyj2po2pvkfjvrr75ansuu3rjjhu6zyrx6xzssqo.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# output => expm1, gt, mul, mul_2, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.elu]
triton_poi_fused_elu_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, dim, hidden_dim, y_dim, sigma=0.02):
super(Generator, self).__init__()
input_dim = dim
hidden_size = hidden_dim
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, y_dim)
nn.init.normal_(self.fc1.weight, std=sigma)
nn.init.constant_(self.fc1.bias, 0)
nn.init.normal_(self.fc2.weight, std=sigma)
nn.init.constant_(self.fc2.bias, 0)
nn.init.normal_(self.fc3.weight, std=sigma)
nn.init.constant_(self.fc3.bias, 0)
def forward(self, noise):
gen_input = noise
output = F.elu(self.fc1(gen_input))
output = F.elu(self.fc2(output))
output = self.fc3(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'hidden_dim': 4, 'y_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_elu_0[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), primals_6, primals_4
class GeneratorNew(nn.Module):
def __init__(self, dim, hidden_dim, y_dim, sigma=0.02):
super(GeneratorNew, self).__init__()
input_dim = dim
hidden_size = hidden_dim
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, y_dim)
nn.init.normal_(self.fc1.weight, std=sigma)
nn.init.constant_(self.fc1.bias, 0)
nn.init.normal_(self.fc2.weight, std=sigma)
nn.init.constant_(self.fc2.bias, 0)
nn.init.normal_(self.fc3.weight, std=sigma)
nn.init.constant_(self.fc3.bias, 0)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ccha23/miml | Generator | false | 6,400 | [
"MIT"
] | 1 | 6a41de1c0bb41d38e3cdc6e9c27363215b7729b9 | https://github.com/ccha23/miml/tree/6a41de1c0bb41d38e3cdc6e9c27363215b7729b9 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, dim, hidden_dim, y_dim, sigma=0.02):
super().__init__()
input_dim = dim
hidden_size = hidden_dim
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, y_dim)
nn.init.normal_(self.fc1.weight, std=sigma)
nn.init.constant_(self.fc1.bias, 0)
nn.init.normal_(self.fc2.weight, std=sigma)
nn.init.constant_(self.fc2.bias, 0)
nn.init.normal_(self.fc3.weight, std=sigma)
nn.init.constant_(self.fc3.bias, 0)
def forward(self, noise):
gen_input = noise
output = F.elu(self.fc1(gen_input))
output = F.elu(self.fc2(output))
output = self.fc3(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
StochasticPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/l3/cl3qgtljwm55hj7prrlq32vnxhqj5elf2qeptwkrprrhumnm7twn.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# output => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
class StochasticPool2d(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super(StochasticPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
self.padding = torch.nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, x, s3pool_flag=False):
if s3pool_flag or self.training:
h, w = x.shape[-2:]
n_h = h // self.grid_size
n_w = w // self.grid_size
n_h = int(n_h)
n_w = int(n_w)
x = self.padding(x)
x = F.max_pool2d(x, self.kernel_size, 1)
w_indices = []
h_indices = []
for i in range(n_w):
position_offset = self.grid_size * i
if i + 1 < n_w:
max_range = self.grid_size
else:
max_range = w - position_offset
if not self.training:
w_index = torch.LongTensor([0])
else:
w_index = torch.LongTensor(1).random_(0, max_range)
w_indices.append(torch.add(w_index, position_offset))
for j in range(n_h):
position_offset = self.grid_size * j
if j + 1 < n_h:
max_range = self.grid_size
else:
max_range = h - position_offset
if not self.training:
h_index = torch.LongTensor([0])
else:
h_index = torch.LongTensor(1).random_(0, max_range)
h_indices.append(torch.add(h_index, position_offset))
h_indices = torch.cat(h_indices, dim=0)
w_indices = torch.cat(w_indices, dim=0)
output = x[:, :, h_indices][:, :, :, w_indices]
None
else:
output = F.avg_pool2d(x, self.kernel_size, self.stride)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class StochasticPool2dNew(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super(StochasticPool2dNew, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
self.padding = torch.nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cclauss/DL4AGX | StochasticPool2d | false | 6,401 | [
"Apache-2.0"
] | 1 | b4d73f6c39b0428e32ce5656352800cc7e2cfb22 | https://github.com/cclauss/DL4AGX/tree/b4d73f6c39b0428e32ce5656352800cc7e2cfb22 | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, kernel_size=2, stride=2, padding=0):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.grid_size = kernel_size
self.padding = torch.nn.ConstantPad2d((0, 1, 0, 1), 0)
def forward(self, x, s3pool_flag=False):
if s3pool_flag or self.training:
h, w = x.shape[-2:]
n_h = h // self.grid_size
n_w = w // self.grid_size
n_h = int(n_h)
n_w = int(n_w)
x = self.padding(x)
x = F.max_pool2d(x, self.kernel_size, 1)
w_indices = []
h_indices = []
for i in range(n_w):
position_offset = self.grid_size * i
if i + 1 < n_w:
max_range = self.grid_size
else:
max_range = w - position_offset
if not self.training:
w_index = torch.LongTensor([0])
else:
w_index = torch.LongTensor(1).random_(0, max_range)
w_indices.append(torch.add(w_index, position_offset))
for j in range(n_h):
position_offset = self.grid_size * j
if j + 1 < n_h:
max_range = self.grid_size
else:
max_range = h - position_offset
if not self.training:
h_index = torch.LongTensor([0])
else:
h_index = torch.LongTensor(1).random_(0, max_range)
h_indices.append(torch.add(h_index, position_offset))
h_indices = torch.cat(h_indices, dim=0)
w_indices = torch.cat(w_indices, dim=0)
output = x[:, :, h_indices][:, :, :, w_indices]
None
else:
output = F.avg_pool2d(x, self.kernel_size, self.stride)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GKDLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mc/cmc44gqwlbgitm3uqkuiwz6fe3jirwculg7zmyndeuqzyyqzyok7.py
# Topologically Sorted Source Nodes: [tea_pred_softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# tea_pred_softmax => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v4/cv4nyn2kde7dd2c53ddahw4vtxyldln6pqt62jrliqindkf3sj5m.py
# Topologically Sorted Source Nodes: [tea_pred_softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# tea_pred_softmax => div_2, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/i3/ci3g6y7l6z45tebcq4v5jxzmexsuntoauq24chhtyzomwouzqp64.py
# Topologically Sorted Source Nodes: [tea_pred_argmax], Original ATen: [aten.argmax]
# Source node to ATen node mapping:
# tea_pred_argmax => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%div_2, 1), kwargs = {})
triton_poi_fused_argmax_2 = async_compile.triton('triton_poi_fused_argmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + (x2), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ks/cksyly2qv4vk7pasmoqo7pjuted5hixm5lctyhktlble3pvwtuuw.py
# Topologically Sorted Source Nodes: [eq, mask, eq_1], Original ATen: [aten.eq, aten._to_copy]
# Source node to ATen node mapping:
# eq => eq
# eq_1 => eq_1
# mask => convert_element_type
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%arg2_1, %argmax), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%convert_element_type, 1), kwargs = {})
triton_poi_fused__to_copy_eq_3 = async_compile.triton('triton_poi_fused__to_copy_eq_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_eq_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_eq_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = 1.0
tmp6 = tmp4 == tmp5
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3b/c3bogmylyojsxbti2b7jdh743s3muup2ij2g27sm562ymmmxy2oh.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {})
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tf/ctf2r57wos4uu4r56cwmykmbu5vg7nhhthl2jx7lq6uknsb6l7oc.py
# Topologically Sorted Source Nodes: [stu_pred_log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# stu_pred_log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
triton_poi_fused__log_softmax_5 = async_compile.triton('triton_poi_fused__log_softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tea_pred_softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tea_pred_softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [tea_pred_argmax], Original ATen: [aten.argmax]
triton_poi_fused_argmax_2.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf0; del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq, mask, eq_1], Original ATen: [aten.eq, aten._to_copy]
triton_poi_fused__to_copy_eq_3.run(arg2_1, buf2, buf3, buf4, 256, grid=grid(256), stream=stream0)
del arg2_1
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(arg0_1, buf5, 256, grid=grid(256), stream=stream0)
del arg0_1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [stu_pred_log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_5.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
return (buf3, buf4, buf6, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.nn.functional as F
class GKDLoss(nn.Module):
"""Knowledge Distillation Loss"""
def __init__(self, T):
super().__init__()
self.t = T
def forward(self, stu_pred, tea_pred, label):
stu_pred_log_softmax = F.log_softmax(stu_pred / self.t, dim=1)
tea_pred_softmax = F.softmax(tea_pred / self.t, dim=1)
tea_pred_argmax = torch.argmax(tea_pred_softmax, dim=1)
mask = torch.eq(label, tea_pred_argmax).float()
count = mask[mask == 1].size(0)
mask = mask.unsqueeze(-1)
only_correct_sample_stu_pred_log_softmax = stu_pred_log_softmax.mul(
mask)
only_correct_sample_tea_pred_softmax = tea_pred_softmax.mul(mask)
only_correct_sample_tea_pred_softmax[
only_correct_sample_tea_pred_softmax == 0.0] = 1.0
loss = F.kl_div(only_correct_sample_stu_pred_log_softmax,
only_correct_sample_tea_pred_softmax, reduction='sum'
) * self.t ** 2 / count
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_argmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x2, tmp46, xmask)
@triton.jit
def triton_poi_fused__to_copy_eq_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1.to(tl.float32)
tmp3 = tmp0 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = 1.0
tmp6 = tmp4 == tmp5
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__log_softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_argmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf0
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__to_copy_eq_3[grid(256)](arg2_1, buf2, buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg2_1
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_4[grid(256)](arg0_1, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_5[grid(256)](buf5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf5
return buf3, buf4, buf6, buf1
class GKDLossNew(nn.Module):
"""Knowledge Distillation Loss"""
def __init__(self, T):
super().__init__()
self.t = T
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training | GKDLoss | false | 6,402 | [
"MIT"
] | 1 | e8b37046e6fbe914f6a68bbde1fe419c46373c1d | https://github.com/carol007/pytorch-ImageNet-CIFAR-COCO-VOC-training/tree/e8b37046e6fbe914f6a68bbde1fe419c46373c1d | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.nn.functional as F
class Model(nn.Module):
"""Knowledge Distillation Loss"""
def __init__(self, T):
super().__init__()
self.t = T
def forward(self, stu_pred, tea_pred, label):
stu_pred_log_softmax = F.log_softmax(stu_pred / self.t, dim=1)
tea_pred_softmax = F.softmax(tea_pred / self.t, dim=1)
tea_pred_argmax = torch.argmax(tea_pred_softmax, dim=1)
mask = torch.eq(label, tea_pred_argmax).float()
count = mask[mask == 1].size(0)
mask = mask.unsqueeze(-1)
only_correct_sample_stu_pred_log_softmax = stu_pred_log_softmax.mul(
mask)
only_correct_sample_tea_pred_softmax = tea_pred_softmax.mul(mask)
only_correct_sample_tea_pred_softmax[
only_correct_sample_tea_pred_softmax == 0.0] = 1.0
loss = F.kl_div(only_correct_sample_stu_pred_log_softmax,
only_correct_sample_tea_pred_softmax, reduction='sum'
) * self.t ** 2 / count
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
makeStyle | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py
# Topologically Sorted Source Nodes: [style], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# style => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2d/c2dxxe3mbggak6huy3dpyor6kge4bsndzz6nxnksovqavwda6k3i.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, pow_2, style_2], Original ATen: [aten.pow, aten.sum, aten.div]
# Source node to ATen node mapping:
# pow_1 => pow_1
# pow_2 => pow_2
# style_2 => div
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %pow_2), kwargs = {})
triton_poi_fused_div_pow_sum_1 = async_compile.triton('triton_poi_fused_div_pow_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [style], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, pow_2, style_2], Original ATen: [aten.pow, aten.sum, aten.div]
triton_poi_fused_div_pow_sum_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class makeStyle(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
def forward(self, x0):
style = F.avg_pool2d(x0, kernel_size=(x0.shape[-2], x0.shape[-1]))
style = self.flatten(style)
style = style / torch.sum(style ** 2, axis=1, keepdim=True) ** 0.5
return style
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_pow_sum_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf1,
class makeStyleNew(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cellimnet/scellseg-publish | makeStyle | false | 6,403 | [
"BSD-3-Clause"
] | 1 | 03bfbae11fedcf430c40419c9afadf55cbd3034d | https://github.com/cellimnet/scellseg-publish/tree/03bfbae11fedcf430c40419c9afadf55cbd3034d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
def forward(self, x0):
style = F.avg_pool2d(x0, kernel_size=(x0.shape[-2], x0.shape[-1]))
style = self.flatten(style)
style = style / torch.sum(style ** 2, axis=1, keepdim=True) ** 0.5
return style
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LocalMLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6e/c6etrnrwnqlbwohg25wyqa2rkojloi6lu4ex24dati4k5ok6ywe4.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_layer_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_native_layer_norm_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + (x2), tmp10, xmask)
tl.store(out_ptr1 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_layer_norm, aten.relu, aten.threshold_backward]
triton_poi_fused_native_layer_norm_relu_threshold_backward_1.run(buf0, buf1, buf2, primals_3, primals_4, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_4
return (buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_native_layer_norm_relu_threshold_backward_1[grid(256)
](buf0, buf1, buf2, primals_3, primals_4, buf3, buf4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf2
del primals_4
return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf4
class LocalMLPNew(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_3 = self.norm.weight
primals_4 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| cdicle-motional/l5kit | LocalMLP | false | 6,404 | [
"Apache-2.0"
] | 1 | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | https://github.com/cdicle-motional/l5kit/tree/4dc4ee5391479bb71f0b373f39c316f9eef5a961 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MV_Softmax | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rs/crsnx7eirymotgjb4bge2rmwh7poptokmoairgsn2lw2j2jyhhbp.py
# Topologically Sorted Source Nodes: [kernel_norm], Original ATen: [aten.div]
# Source node to ATen node mapping:
# kernel_norm => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kq/ckq7yos7pu4pdqbldmezq4qzzp4smb3obbexm5mkgz2idbxyhosy.py
# Topologically Sorted Source Nodes: [arange], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# arange => iota_default
# Graph fragment:
# %iota_default : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_1 = async_compile.triton('triton_poi_fused_arange_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qf/cqfmxqww7ne5njgnlptwncplmpnrtbiyhipigmlsja2qukzwcula.py
# Topologically Sorted Source Nodes: [sub, mask], Original ATen: [aten.sub, aten.gt]
# Source node to ATen node mapping:
# mask => gt
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, 0.35), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%mm, %sub), kwargs = {})
triton_poi_fused_gt_sub_2 = async_compile.triton('triton_poi_fused_gt_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 4")
tmp7 = tl.load(in_ptr0 + (tmp5 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = 0.35
tmp9 = tmp7 - tmp8
tmp10 = tmp0 > tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dj/cdjienvpl2qfso5gijmaya2ae5geu45rg527txw5g26wuc6ibt3z.py
# Topologically Sorted Source Nodes: [sub, gt_2, final_gt], Original ATen: [aten.sub, aten.gt, aten.where]
# Source node to ATen node mapping:
# final_gt => where
# gt_2 => gt_1
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, 0.35), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view, 0.35), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %sub, %view), kwargs = {})
triton_poi_fused_gt_sub_where_3 = async_compile.triton('triton_poi_fused_gt_sub_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_sub_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_sub_where_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = 0.35
tmp8 = tmp6 > tmp7
tmp9 = tmp6 - tmp7
tmp10 = tl.where(tmp8, tmp9, tmp6)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kernel_norm], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kernel_norm, cos_theta], Original ATen: [aten.div, aten.mm]
extern_kernels.mm(primals_2, buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [arange], Original ATen: [aten.arange]
triton_poi_fused_arange_1.run(buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [sub, mask], Original ATen: [aten.sub, aten.gt]
triton_poi_fused_gt_sub_2.run(buf1, primals_3, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, gt_2, final_gt], Original ATen: [aten.sub, aten.gt, aten.where]
triton_poi_fused_gt_sub_where_3.run(primals_3, buf1, buf4, buf5, 4, grid=grid(4), stream=stream0)
return (buf1, buf3, buf5, primals_1, primals_3, buf2, buf4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
from torch.nn import functional as F
import torch._utils
from torch.nn import Parameter
from itertools import product as product
import torch.utils.data.distributed
class MV_Softmax(Module):
"""Implementation for "Mis-classified Vector Guided Softmax Loss for Face Recognition"
"""
def __init__(self, feat_dim, num_class, is_am, margin=0.35, mv_weight=
1.12, scale=32):
super(MV_Softmax, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin = margin
self.mv_weight = mv_weight
self.scale = scale
self.is_am = is_am
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = self.sin_m * margin
def forward(self, x, label):
kernel_norm = F.normalize(self.weight, dim=0)
cos_theta = torch.mm(x, kernel_norm)
batch_size = label.size(0)
gt = cos_theta[torch.arange(0, batch_size), label].view(-1, 1)
if self.is_am:
mask = cos_theta > gt - self.margin
final_gt = torch.where(gt > self.margin, gt - self.margin, gt)
else:
sin_theta = torch.sqrt(1.0 - torch.pow(gt, 2))
cos_theta_m = gt * self.cos_m - sin_theta * self.sin_m
mask = cos_theta > cos_theta_m
final_gt = torch.where(gt > 0.0, cos_theta_m, gt)
hard_example = cos_theta[mask]
cos_theta[mask] = self.mv_weight * hard_example + self.mv_weight - 1.0
cos_theta.scatter_(1, label.data.view(-1, 1), final_gt)
cos_theta *= self.scale
return cos_theta
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'feat_dim': 4, 'num_class': 4, 'is_am': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import math
import torch._utils
from torch.nn import Parameter
from itertools import product as product
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_arange_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_gt_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr0 + (tmp5 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp8 = 0.35
tmp9 = tmp7 - tmp8
tmp10 = tmp0 > tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_gt_sub_where_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = 0.35
tmp8 = tmp6 > tmp7
tmp9 = tmp6 - tmp7
tmp10 = tl.where(tmp8, tmp9, tmp6)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, buf0, out=buf1)
del buf0
buf2 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_arange_1[grid(4)](buf2, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_gt_sub_2[grid(16)](buf1, primals_3, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.bool)
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused_gt_sub_where_3[grid(4)](primals_3, buf1, buf4,
buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
return (buf1, buf3, buf5, primals_1, primals_3, buf2, buf4,
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0))
class MV_SoftmaxNew(Module):
"""Implementation for "Mis-classified Vector Guided Softmax Loss for Face Recognition"
"""
def __init__(self, feat_dim, num_class, is_am, margin=0.35, mv_weight=
1.12, scale=32):
super(MV_SoftmaxNew, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin = margin
self.mv_weight = mv_weight
self.scale = scale
self.is_am = is_am
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = self.sin_m * margin
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| cavalleria/FaceX-Zoo | MV_Softmax | false | 6,405 | [
"Apache-2.0"
] | 1 | c4bf8924f1858928f8cf83efabf8ad237c67f620 | https://github.com/cavalleria/FaceX-Zoo/tree/c4bf8924f1858928f8cf83efabf8ad237c67f620 | from torch.nn import Module
import math
import torch
from torch.nn import functional as F
import torch._utils
from torch.nn import Parameter
from itertools import product as product
import torch.utils.data.distributed
class Model(Module):
"""Implementation for "Mis-classified Vector Guided Softmax Loss for Face Recognition"
"""
def __init__(self, feat_dim, num_class, is_am, margin=0.35, mv_weight=
1.12, scale=32):
super().__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
self.margin = margin
self.mv_weight = mv_weight
self.scale = scale
self.is_am = is_am
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = self.sin_m * margin
def forward(self, x, label):
kernel_norm = F.normalize(self.weight, dim=0)
cos_theta = torch.mm(x, kernel_norm)
batch_size = label.size(0)
gt = cos_theta[torch.arange(0, batch_size), label].view(-1, 1)
if self.is_am:
mask = cos_theta > gt - self.margin
final_gt = torch.where(gt > self.margin, gt - self.margin, gt)
else:
sin_theta = torch.sqrt(1.0 - torch.pow(gt, 2))
cos_theta_m = gt * self.cos_m - sin_theta * self.sin_m
mask = cos_theta > cos_theta_m
final_gt = torch.where(gt > 0.0, cos_theta_m, gt)
hard_example = cos_theta[mask]
cos_theta[mask] = self.mv_weight * hard_example + self.mv_weight - 1.0
cos_theta.scatter_(1, label.data.view(-1, 1), final_gt)
cos_theta *= self.scale
return cos_theta
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [4, 4, 4]
|
ShakeResNeXt | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# h => convolution
# h_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.avg_pool2d]
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0, 0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (16, 1024), (1024, 1), 0), reinterpret_tensor(primals_4, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf4)
del primals_5
return (buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (16, 1024), (1024, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXt(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXt, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'cardinary': 4, 'label': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 1024), (1024, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (16, 1024),
(1024, 1), 0), reinterpret_tensor(primals_4, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (16,
1024), (1024, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super(ShakeBottleNeck, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNeXtNew(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super(ShakeResNeXtNew, self).__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4
for i in range(n_units):
layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch,
cardinary, stride=stride))
self.in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| cdtalley/AutoML | ShakeResNeXt | false | 6,406 | [
"MIT"
] | 1 | 918cda6bb1bd55b4ca974bdcdd59e32b2e28399d | https://github.com/cdtalley/AutoML/tree/918cda6bb1bd55b4ca974bdcdd59e32b2e28399d | import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super().__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBottleNeck(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
super().__init__()
self.equal_io = in_ch == out_ch
self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary,
stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):
return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=
False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.
Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=
cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace
=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False),
nn.BatchNorm2d(out_ch))
class Model(nn.Module):
def __init__(self, depth, w_base, cardinary, label):
super().__init__()
n_units = (depth - 2) // 9
n_chs = [64, 128, 256, 1024]
self.n_chs = n_chs
self.in_ch = n_chs[0]
self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)
self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)
self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)
self.fc_out = nn.Linear(n_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.n_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):
layers = []
mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch
# ... truncated (>4000 chars) for memory efficiency |
p_model | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/63/c63e7p6srovrzezxgszbsrlwtqy7uvmjg5eeopmshnuja2aljc2l.py
# Topologically Sorted Source Nodes: [pooled], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# pooled => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze, [1, 4], [1, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [pooled], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class p_model(nn.Module):
"""
input: N * C * W * H
output: N * 1 * W * H
"""
def __init__(self):
super(p_model, self).__init__()
def forward(self, x):
n, c, w, h = x.size()
x = x.view(n, c, w * h).permute(0, 2, 1)
pooled = F.avg_pool1d(x, c)
return pooled.view(n, 1, w, h)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32
)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0),
class p_modelNew(nn.Module):
"""
input: N * C * W * H
output: N * 1 * W * H
"""
def __init__(self):
super(p_modelNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cenkcorapci/visual-fashion-item-search | p_model | false | 6,407 | [
"MIT"
] | 1 | 47b93f97383c1b7f9ec23bb4ff66f90504db3da8 | https://github.com/cenkcorapci/visual-fashion-item-search/tree/47b93f97383c1b7f9ec23bb4ff66f90504db3da8 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
input: N * C * W * H
output: N * 1 * W * H
"""
def __init__(self):
super().__init__()
def forward(self, x):
n, c, w, h = x.size()
x = x.view(n, c, w * h).permute(0, 2, 1)
pooled = F.avg_pool1d(x, c)
return pooled.view(n, 1, w, h)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ShakeResNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# h => convolution
# h_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.avg_pool2d]
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0, 0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (256, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4)
del primals_5
return (buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (256, 16), (16, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'depth': 1, 'w_base': 4, 'label': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten.avg_pool2d.default(buf1, [8, 8], [8, 8], [0,
0], False, True, None)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((256, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (256, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_5
return buf4, primals_1, primals_3, buf1, reinterpret_tensor(buf3, (256,
16), (16, 1), 0), primals_4
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class ShakeResNetNew(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNetNew, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.c_in.weight
primals_2 = self.c_in.bias
primals_4 = self.fc_out.weight
primals_5 = self.fc_out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| cdtalley/AutoML | ShakeResNet | false | 6,408 | [
"MIT"
] | 1 | 918cda6bb1bd55b4ca974bdcdd59e32b2e28399d | https://github.com/cdtalley/AutoML/tree/918cda6bb1bd55b4ca974bdcdd59e32b2e28399d | import math
import torch
from torch import nn
from numpy import int64 as int64
import torch.nn.functional as F
from torch.autograd import Variable
class ShakeShake(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, training=True):
if training:
alpha = torch.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super().__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0,
bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)
h2 = self.conv2(h2)
h = torch.cat((h1, h2), 1)
return self.bn(h)
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super().__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch,
stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch,
out_ch, 3, padding=1, stride=stride, bias=False), nn.
BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch,
out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch)
)
class Model(nn.Module):
def __init__(self, depth, w_base, label):
super().__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [1, 4, 4]
|
LanguageModelCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/y6/cy6c63inntzv3gk7tttvtntl47lyazeloek7emnkrhkhkqbx5kci.py
# Topologically Sorted Source Nodes: [neg, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# neg => neg
# output => mul
# output_1 => div
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg2_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg2_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = -tmp6
tmp8 = tmp7.to(tl.float32)
tmp10 = tmp8 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp17 = tmp13 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LanguageModelCriterionNew(nn.Module):
def __init__(self):
super(LanguageModelCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| chagmgang/object_relation_transformer | LanguageModelCriterion | false | 6,409 | [
"MIT"
] | 1 | 04b88514f97232c12b576720e4b82226751c3c48 | https://github.com/chagmgang/object_relation_transformer/tree/04b88514f97232c12b576720e4b82226751c3c48 | import torch
import torch.nn as nn
from torch.autograd import *
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target, mask):
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return []
|
BertSelfOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6d/c6dv445enwddy5hcfn3p4vvjynq4oa2vnla2acdsag6hrxww6urd.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_2 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutputNew(nn.Module):
def __init__(self, config):
super(BertSelfOutputNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| caldoe/BERT-NL2SPARQL | BertSelfOutput | false | 6,410 | [
"MIT"
] | 1 | 2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8 | https://github.com/caldoe/BERT-NL2SPARQL/tree/2e09c1aeffc855bc7f1dc8c182e21153b2bc73a8 | from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
|
Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tc/ctcoccnojrifwsjhb4gqgfu5kxpt6dvdpv4qwca7cbgn27ktptbk.py
# Topologically Sorted Source Nodes: [mean, sub, std, add, norm, mul, norm_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# norm => div
# norm_1 => add_1
# std => sqrt, var
# sub => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, std, add, norm, mul, norm_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
class Norm(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class NormNew(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| chandar-lab/CriticalGradientOptimization | Norm | false | 6,411 | [
"MIT"
] | 1 | 1af4b1df40489991289bb50bb69859a00b2c97c6 | https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6 | import torch
import torch.nn as nn
import torch.onnx
class Model(nn.Module):
def __init__(self, emb_dim, eps=1e-06):
super().__init__()
self.size = emb_dim
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
"""
inputs:
x: input of shape: (batch size, sequence length, embedding dimensions)
outputs: Scaled, normalized x
"""
norm = (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=
True) + self.eps)
norm = self.alpha * norm + self.bias
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RewardCriterion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jk/cjk4a6hzo7lmkian2zuwtbzbsa4v76s5vf3coi67m65hich66au5.py
# Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# mul => mul
# neg => neg
# output => mul_1
# output_1 => div
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_div_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = -tmp0
tmp3 = tmp1 * tmp2
tmp4 = r0 % 4
tmp5 = tl.full([1, 1], 0, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tl.full([1, 1], 1, tl.int64)
tmp8 = tmp4 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tmp4 >= tmp7
tmp13 = tl.full([1, 1], 4, tl.int64)
tmp14 = tmp4 < tmp13
tmp15 = tl.load(in_ptr2 + (tl.broadcast_to((4*(r0 // 4)) + ((-1) + (r0 % 4)), [XBLOCK, RBLOCK])), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = tmp17.to(tl.float32)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp8, tmp11, tmp20)
tmp22 = tmp3 * tmp21
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.sum(tmp23, 1)[:, None]
tmp26 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp29 = tmp25 / tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, mul, output, sum_1, sum_2, output_1], Original ATen: [aten.neg, aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import *
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
"""
This function computes
log(y_t) * reward * mask_t (where mask_t zeroes out non-words in the sequence)
given
input = predicted probability
sequence = predicted word index
reward = ...
"""
input = to_contiguous(input).view(-1)
reward = to_contiguous(reward).view(-1)
mask = (seq > 0).float()
mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1),
mask[:, :-1]], 1)).view(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = -tmp0
tmp3 = tmp1 * tmp2
tmp4 = r0 % 4
tl.full([1, 1], 0, tl.int64)
tmp7 = tl.full([1, 1], 1, tl.int64)
tmp8 = tmp4 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = tmp4 >= tmp7
tl.full([1, 1], 4, tl.int64)
tmp15 = tl.load(in_ptr2 + tl.broadcast_to(4 * (r0 // 4) + (-1 + r0 % 4),
[XBLOCK, RBLOCK]), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = tmp17.to(tl.float32)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp8, tmp11, tmp20)
tmp22 = tmp3 * tmp21
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tmp25 = tl.sum(tmp23, 1)[:, None]
tmp26 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp29 = tmp25 / tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg0_1, arg1_1,
arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class RewardCriterionNew(nn.Module):
def __init__(self):
super(RewardCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| chagmgang/object_relation_transformer | RewardCriterion | false | 6,412 | [
"MIT"
] | 1 | 04b88514f97232c12b576720e4b82226751c3c48 | https://github.com/chagmgang/object_relation_transformer/tree/04b88514f97232c12b576720e4b82226751c3c48 | import torch
import torch.nn as nn
from torch.autograd import *
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, seq, reward):
"""
This function computes
log(y_t) * reward * mask_t (where mask_t zeroes out non-words in the sequence)
given
input = predicted probability
sequence = predicted word index
reward = ...
"""
input = to_contiguous(input).view(-1)
reward = to_contiguous(reward).view(-1)
mask = (seq > 0).float()
mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1),
mask[:, :-1]], 1)).view(-1)
output = -input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/il/cile5rubx7j2trpwwhxvyx2n7vmplsdhfcaobcwnijtfsgj3p43b.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dice => div
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp1, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp2, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 2.0
tmp14 = tmp6 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp9 + tmp12
tmp18 = tmp17 + tmp15
tmp19 = tmp16 / tmp18
tmp20 = tmp15 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = torch.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum(
) + smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp1, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp2, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 2.0
tmp14 = tmp6 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp9 + tmp12
tmp18 = tmp17 + tmp15
tmp19 = tmp16 / tmp18
tmp20 = tmp15 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| chakerouari/UNET_segmetation | DiceLoss | false | 6,413 | [
"MIT"
] | 1 | a7d9e9ccd31595d482f620cbf9a625a486f5f0df | https://github.com/chakerouari/UNET_segmetation/tree/a7d9e9ccd31595d482f620cbf9a625a486f5f0df | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, weight=None, size_average=True):
super().__init__()
def forward(self, inputs, targets, smooth=1):
inputs = torch.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum(
) + smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LocalSubGraphLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3e/c3eujxenartdmlyuzc222hnqzsyd2rlkdf4g2o25jhth6elnxv7k.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_layer_norm, aten.relu]
# Source node to ATen node mapping:
# x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_layer_norm_relu_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x0 + (8*x1)), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xv/cxvulv4lzlvhzhwpz6ckcdkrhf5fzrnvc4u2bwjmxivnx6m6ui34.py
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
# Source node to ATen node mapping:
# gt => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_gt_2 = async_compile.triton('triton_poi_fused_gt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tt/cttzjcuuyhw4l25vtuen4ir6qwainhdouuzpzt35pk47mapflw5d.py
# Topologically Sorted Source Nodes: [masked_x, max_1], Original ATen: [aten.masked_fill, aten.max]
# Source node to ATen node mapping:
# masked_x => full_default, where
# max_1 => max_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %full_default, %relu), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%where, 1, True), kwargs = {})
triton_poi_fused_masked_fill_max_3 = async_compile.triton('triton_poi_fused_masked_fill_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_masked_fill_max_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_masked_fill_max_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x0 + (32*x1)), xmask)
tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp5 = tl.load(in_ptr1 + (8 + x0 + (32*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (16 + x0 + (32*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (24 + x0 + (32*x1)), xmask)
tmp2 = float("-inf")
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ty/ctyfm6vre472lzqn3224pxin7zqwjubxcqsgjjgh2orlphfjfvxn.py
# Topologically Sorted Source Nodes: [masked_x, max_1, x_agg_1], Original ATen: [aten.masked_fill, aten.max, aten.repeat]
# Source node to ATen node mapping:
# masked_x => full_default, where
# max_1 => max_1
# x_agg_1 => repeat
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %full_default, %relu), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%where, 1, True), kwargs = {})
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%getitem_2, [1, 4, 1]), kwargs = {})
triton_poi_fused_masked_fill_max_repeat_4 = async_compile.triton('triton_poi_fused_masked_fill_max_repeat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_masked_fill_max_repeat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_masked_fill_max_repeat_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0 + (8*x3)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf3 = reinterpret_tensor(buf7, (4, 4, 4), (32, 8, 1), 0) # alias
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.native_layer_norm, aten.relu]
triton_poi_fused_native_layer_norm_relu_1.run(buf0, buf1, buf2, primals_3, primals_4, buf3, 64, grid=grid(64), stream=stream0)
del buf1
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
triton_poi_fused_gt_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = reinterpret_tensor(buf2, (4, 1, 4), (4, 16, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [masked_x, max_1], Original ATen: [aten.masked_fill, aten.max]
triton_poi_fused_masked_fill_max_3.run(buf4, buf3, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf7, (4, 4, 4), (32, 8, 1), 4) # alias
# Topologically Sorted Source Nodes: [masked_x, max_1, x_agg_1], Original ATen: [aten.masked_fill, aten.max, aten.repeat]
triton_poi_fused_masked_fill_max_repeat_4.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf8)
del primals_7
return (reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0), primals_3, primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf0, buf4, reinterpret_tensor(buf7, (16, 8), (8, 1), 0), primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
class LocalSubGraphLayer(nn.Module):
def __init__(self, dim_in: 'int', dim_out: 'int') ->None:
"""Local subgraph layer
:param dim_in: input feat size
:type dim_in: int
:param dim_out: output feat size
:type dim_out: int
"""
super(LocalSubGraphLayer, self).__init__()
self.mlp = LocalMLP(dim_in)
self.linear_remap = nn.Linear(dim_in * 2, dim_out)
def forward(self, x: 'torch.Tensor', invalid_mask: 'torch.Tensor'
) ->torch.Tensor:
"""Forward of the model
:param x: input tensor
:tensor (B,N,P,dim_in)
:param invalid_mask: invalid mask for x
:tensor invalid_mask (B,N,P)
:return: output tensor (B,N,P,dim_out)
:rtype: torch.Tensor
"""
_, num_vectors, _ = x.shape
x = self.mlp(x)
masked_x = x.masked_fill(invalid_mask[..., None] > 0, float('-inf'))
x_agg = masked_x.max(dim=1, keepdim=True).values
x_agg = x_agg.repeat(1, num_vectors, 1)
x = torch.cat([x, x_agg], dim=-1)
x = self.linear_remap(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp10, xmask)
@triton.jit
def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_masked_fill_max_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + (x0 + 32 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp5 = tl.load(in_ptr1 + (8 + x0 + 32 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (16 + x0 + 32 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr1 + (24 + x0 + 32 * x1), xmask)
tmp2 = float('-inf')
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_masked_fill_max_repeat_4(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (x0 + 8 * x3), tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](buf0, buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
buf3 = reinterpret_tensor(buf7, (4, 4, 4), (32, 8, 1), 0)
triton_poi_fused_native_layer_norm_relu_1[grid(64)](buf0, buf1,
buf2, primals_3, primals_4, buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf1
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
triton_poi_fused_gt_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = reinterpret_tensor(buf2, (4, 1, 4), (4, 16, 1), 0)
del buf2
triton_poi_fused_masked_fill_max_3[grid(16)](buf4, buf3, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf7, (4, 4, 4), (32, 8, 1), 4)
triton_poi_fused_masked_fill_max_repeat_4[grid(64)](buf5, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf5
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf8)
del primals_7
return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0
), primals_3, primals_4, reinterpret_tensor(primals_1, (16, 4), (4,
1), 0), buf0, buf4, reinterpret_tensor(buf7, (16, 8), (8, 1), 0
), primals_6
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
class LocalSubGraphLayerNew(nn.Module):
def __init__(self, dim_in: 'int', dim_out: 'int') ->None:
"""Local subgraph layer
:param dim_in: input feat size
:type dim_in: int
:param dim_out: output feat size
:type dim_out: int
"""
super(LocalSubGraphLayerNew, self).__init__()
self.mlp = LocalMLP(dim_in)
self.linear_remap = nn.Linear(dim_in * 2, dim_out)
def forward(self, input_0, input_1):
primals_2 = self.mlp.linear.weight
primals_3 = self.mlp.norm.weight
primals_4 = self.mlp.norm.bias
primals_6 = self.linear_remap.weight
primals_7 = self.linear_remap.bias
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| cdicle-motional/l5kit | LocalSubGraphLayer | false | 6,414 | [
"Apache-2.0"
] | 1 | 4dc4ee5391479bb71f0b373f39c316f9eef5a961 | https://github.com/cdicle-motional/l5kit/tree/4dc4ee5391479bb71f0b373f39c316f9eef5a961 | import torch
from torch import nn
import torch.nn.functional as F
class LocalMLP(nn.Module):
def __init__(self, dim_in: 'int', use_norm: 'bool'=True):
"""a Local 1 layer MLP
:param dim_in: feat in size
:type dim_in: int
:param use_norm: if to apply layer norm, defaults to True
:type use_norm: bool, optional
"""
super().__init__()
self.linear = nn.Linear(dim_in, dim_in, bias=not use_norm)
self.use_norm = use_norm
if use_norm:
self.norm = nn.LayerNorm(dim_in)
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""forward of the module
:param x: input tensor (..., dim_in)
:type x: torch.Tensor
:return: output tensor (..., dim_in)
:rtype: torch.Tensor
"""
x = self.linear(x)
if hasattr(self, 'norm'):
x = self.norm(x)
x = F.relu(x, inplace=True)
return x
class Model(nn.Module):
def __init__(self, dim_in: 'int', dim_out: 'int') ->None:
"""Local subgraph layer
:param dim_in: input feat size
:type dim_in: int
:param dim_out: output feat size
:type dim_out: int
"""
super().__init__()
self.mlp = LocalMLP(dim_in)
self.linear_remap = nn.Linear(dim_in * 2, dim_out)
def forward(self, x: 'torch.Tensor', invalid_mask: 'torch.Tensor'
) ->torch.Tensor:
"""Forward of the model
:param x: input tensor
:tensor (B,N,P,dim_in)
:param invalid_mask: invalid mask for x
:tensor invalid_mask (B,N,P)
:return: output tensor (B,N,P,dim_out)
:rtype: torch.Tensor
"""
_, num_vectors, _ = x.shape
x = self.mlp(x)
masked_x = x.masked_fill(invalid_mask[..., None] > 0, float('-inf'))
x_agg = masked_x.max(dim=1, keepdim=True).values
x_agg = x_agg.repeat(1, num_vectors, 1)
x = torch.cat([x, x_agg], dim=-1)
x = self.linear_remap(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
PinballLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/n3/cn3ibaenux4ljrzrug2suadaqyb4uuj4p27xmsnxrco7of4fcu76.py
# Topologically Sorted Source Nodes: [delta_y, mul, mul_1, pinball, pinball_1], Original ATen: [aten.sub, aten.mul, aten.maximum, aten.mean]
# Source node to ATen node mapping:
# delta_y => sub
# mul => mul
# mul_1 => mul_1
# pinball => maximum
# pinball_1 => mean
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (0.5, %sub), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (-0.5, %sub), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%maximum,), kwargs = {})
triton_per_fused_maximum_mean_mul_sub_0 = async_compile.triton('triton_per_fused_maximum_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_maximum_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [delta_y, mul, mul_1, pinball, pinball_1], Original ATen: [aten.sub, aten.mul, aten.maximum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_maximum_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PinballLoss(nn.Module):
"""Computes the pinball loss between y and y_hat.
y: actual values in torch tensor.
y_hat: predicted values in torch tensor.
tau: a float between 0 and 1 the slope of the pinball loss. In the context
of quantile regression, the value of alpha determine the conditional
quantile level.
return: pinball_loss
"""
def __init__(self, tau=0.5):
super(PinballLoss, self).__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean()
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp3 * tmp2
tmp5 = -0.5
tmp6 = tmp5 * tmp2
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_maximum_mean_mul_sub_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class PinballLossNew(nn.Module):
"""Computes the pinball loss between y and y_hat.
y: actual values in torch tensor.
y_hat: predicted values in torch tensor.
tau: a float between 0 and 1 the slope of the pinball loss. In the context
of quantile regression, the value of alpha determine the conditional
quantile level.
return: pinball_loss
"""
def __init__(self, tau=0.5):
super(PinballLossNew, self).__init__()
self.tau = tau
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| cchallu/esrnn | PinballLoss | false | 6,415 | [
"MIT"
] | 1 | 543ca365c70be2775a4b5863820b246071ccde3c | https://github.com/cchallu/esrnn/tree/543ca365c70be2775a4b5863820b246071ccde3c | import torch
import torch.nn as nn
class Model(nn.Module):
"""Computes the pinball loss between y and y_hat.
y: actual values in torch tensor.
y_hat: predicted values in torch tensor.
tau: a float between 0 and 1 the slope of the pinball loss. In the context
of quantile regression, the value of alpha determine the conditional
quantile level.
return: pinball_loss
"""
def __init__(self, tau=0.5):
super().__init__()
self.tau = tau
def forward(self, y, y_hat):
delta_y = torch.sub(y, y_hat)
pinball = torch.max(torch.mul(self.tau, delta_y), torch.mul(self.
tau - 1, delta_y))
pinball = pinball.mean()
return pinball
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TripletMarginLossCosine | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qn/cqnpu7p4ighjxwzhyvwxizwrkq2rtaagvk23ots7oabybofnppez.py
# Topologically Sorted Source Nodes: [cosine_similarity, cosine_similarity_1], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul]
# Source node to ATen node mapping:
# cosine_similarity => clamp_min, clamp_min_1, div, div_1, mul, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2
# cosine_similarity_1 => clamp_min_2, clamp_min_3, div_2, div_3, mul_1, pow_5, pow_6, pow_7, pow_8, sum_4, sum_5
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %clamp_min), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %clamp_min_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_6, 1e-08), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %clamp_min_2), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg2_1, 2), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1], True), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_8, 1e-08), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg2_1, %clamp_min_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, %div_2), kwargs = {})
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 15, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x3), xmask)
tmp17 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x3), xmask)
tmp33 = tl.load(in_ptr2 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr2 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tmp34 = tmp33 * tmp33
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp42 = tmp41 * tmp41
tmp43 = tmp40 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = triton_helpers.maximum(tmp44, tmp13)
tmp46 = tmp32 / tmp45
tmp47 = tmp15 * tmp46
tl.store(out_ptr0 + (x3), tmp31, xmask)
tl.store(out_ptr1 + (x3), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vi/cvigvemwoq43loq55hkwib7y2z2slcb7j6tbivm2ymaifahn6npc.py
# Topologically Sorted Source Nodes: [d_p, add, d_n, sub_2, dist_hinge, loss], Original ATen: [aten.rsub, aten.add, aten.sub, aten.clamp, aten.mean]
# Source node to ATen node mapping:
# add => add
# d_n => sub_1
# d_p => sub
# dist_hinge => clamp_min_4
# loss => mean
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %sub_1), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_4,), kwargs = {})
triton_per_fused_add_clamp_mean_rsub_sub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_rsub_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_rsub_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_mean_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + ((64*(r0 // 16)) + (r0 % 16)), None)
tmp1 = tl.load(in_ptr0 + (16 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp3 = tl.load(in_ptr0 + (32 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp5 = tl.load(in_ptr0 + (48 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp10 = tl.load(in_ptr1 + ((64*(r0 // 16)) + (r0 % 16)), None)
tmp11 = tl.load(in_ptr1 + (16 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp13 = tl.load(in_ptr1 + (32 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp15 = tl.load(in_ptr1 + (48 + (64*(r0 // 16)) + (r0 % 16)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tmp8 + tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp7 - tmp16
tmp18 = tmp9 - tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = 64.0
tmp25 = tmp23 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cosine_similarity, cosine_similarity_1], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0.run(arg1_1, arg0_1, arg2_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [d_p, add, d_n, sub_2, dist_hinge, loss], Original ATen: [aten.rsub, aten.add, aten.sub, aten.clamp, aten.mean]
triton_per_fused_add_clamp_mean_rsub_sub_1.run(buf3, buf0, buf1, 1, 64, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class TripletMarginLossCosine(nn.Module):
def __init__(self, margin=1.0):
super(TripletMarginLossCosine, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative):
d_p = 1 - F.cosine_similarity(anchor, positive).view(-1, 1)
d_n = 1 - F.cosine_similarity(anchor, negative).view(-1, 1)
dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0.0)
loss = torch.mean(dist_hinge)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x3, xmask)
tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr2 + x3, xmask)
tmp33 = tl.load(in_ptr2 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr2 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp41 = tl.load(in_ptr2 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp17 * tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = triton_helpers.maximum(tmp28, tmp13)
tmp30 = tmp16 / tmp29
tmp31 = tmp15 * tmp30
tmp34 = tmp33 * tmp33
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp42 = tmp41 * tmp41
tmp43 = tmp40 + tmp42
tmp44 = libdevice.sqrt(tmp43)
tmp45 = triton_helpers.maximum(tmp44, tmp13)
tmp46 = tmp32 / tmp45
tmp47 = tmp15 * tmp46
tl.store(out_ptr0 + x3, tmp31, xmask)
tl.store(out_ptr1 + x3, tmp47, xmask)
@triton.jit
def triton_per_fused_add_clamp_mean_rsub_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None)
tmp1 = tl.load(in_ptr0 + (16 + 64 * (r0 // 16) + r0 % 16), None)
tmp3 = tl.load(in_ptr0 + (32 + 64 * (r0 // 16) + r0 % 16), None)
tmp5 = tl.load(in_ptr0 + (48 + 64 * (r0 // 16) + r0 % 16), None)
tmp10 = tl.load(in_ptr1 + (64 * (r0 // 16) + r0 % 16), None)
tmp11 = tl.load(in_ptr1 + (16 + 64 * (r0 // 16) + r0 % 16), None)
tmp13 = tl.load(in_ptr1 + (32 + 64 * (r0 // 16) + r0 % 16), None)
tmp15 = tl.load(in_ptr1 + (48 + 64 * (r0 // 16) + r0 % 16), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp9 = tmp8 + tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp7 - tmp16
tmp18 = tmp9 - tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = 64.0
tmp25 = tmp23 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)](
arg1_1, arg0_1, arg2_1, buf0, buf1, 256, XBLOCK=128, num_warps=
4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_add_clamp_mean_rsub_sub_1[grid(1)](buf3, buf0,
buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class TripletMarginLossCosineNew(nn.Module):
def __init__(self, margin=1.0):
super(TripletMarginLossCosineNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| cenkcorapci/visual-fashion-item-search | TripletMarginLossCosine | false | 6,416 | [
"MIT"
] | 1 | 47b93f97383c1b7f9ec23bb4ff66f90504db3da8 | https://github.com/cenkcorapci/visual-fashion-item-search/tree/47b93f97383c1b7f9ec23bb4ff66f90504db3da8 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, margin=1.0):
super().__init__()
self.margin = margin
def forward(self, anchor, positive, negative):
d_p = 1 - F.cosine_similarity(anchor, positive).view(-1, 1)
d_n = 1 - F.cosine_similarity(anchor, negative).view(-1, 1)
dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0.0)
loss = torch.mean(dist_hinge)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
ImgPatches | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vo/cvo4iu6rn2jljwd7fn6tqzwns5mcxrufyjzpv2b3qybl4vhvc3h2.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2304
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4m/c4mshxu4bq3hduvpdghbp5cvwf5wm5cau2c3c2hmzusgbrcciu2t.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 3072
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = (yindex // 768)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (768*x2) + (196608*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (768, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 4, 4), (48, 1, 12, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 2304, 16, grid=grid(2304, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf3 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf2, primals_2, buf3, 3072, 256, grid=grid(3072, 256), stream=stream0)
del buf2
del primals_2
return (reinterpret_tensor(buf3, (4, 256, 768), (196608, 1, 256), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((768, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((768, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ImgPatches(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, img):
patches = self.patch_embed(img).flatten(2).transpose(1, 2)
return patches
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 2304
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 768
y1 = yindex // 768
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 768 * x2 + 196608 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((768, 3, 4, 4), (48, 1, 12, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(2304, 16)](primals_1, buf0, 2304, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 768, 16, 16), (196608, 1, 12288, 768))
buf3 = empty_strided_cuda((4, 768, 16, 16), (196608, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_2[grid(3072, 256)](buf2, primals_2,
buf3, 3072, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf2
del primals_2
return reinterpret_tensor(buf3, (4, 256, 768), (196608, 1, 256), 0
), buf0, buf1
class ImgPatchesNew(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, input_0):
primals_1 = self.patch_embed.weight
primals_2 = self.patch_embed.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ch0n9waiu/TransCycleGAN | ImgPatches | false | 6,417 | [
"MIT"
] | 1 | a3e846e21101400282a9f1393c1f8d150a3d92c9 | https://github.com/ch0n9waiu/TransCycleGAN/tree/a3e846e21101400282a9f1393c1f8d150a3d92c9 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_channel=3, dim=768, patch_size=4):
super().__init__()
self.patch_embed = nn.Conv2d(input_channel, dim, kernel_size=
patch_size, stride=patch_size)
def forward(self, img):
patches = self.patch_embed(img).flatten(2).transpose(1, 2)
return patches
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
MultiHeadAttn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gu/cguak4pmui2ydemh2nfin5rax4se6fwibhum52l6wzraswotmhe6.py
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# q => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (48*x1) + (192*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wt/cwtwhu7iqo5rair5bnweuo5khmb7e5l2qv2pku3hann6ogfos7mk.py
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# v => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + (4*x2) + (48*x1) + (192*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (32 + x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v4/cv4ht4ztzq6xell45o2uwdqpaatovskqg4idcdyplfbbfxu7r6h6.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# k => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + (4*x2) + (48*x1) + (192*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5335fx2gusb6qgcjpudfhou76zahyma2ckrjw26lmkw2q3zxd3.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zh/czh6tw7ngffcygnivwvcjex5edxy3ms4t27ymyn2hemxlpspxzq7.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/my/cmyquxsupn5jxhfg2hje2gcqqg62qnzzhyxd7jixp5puholjjh5o.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (64*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ns/cnspfsjjmvserkfymbru7x5vm2xumtyor5javdiv74jr3avx67rq.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_11), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7u/c7uxwow3tztifyrr5oj6dotpbrh7qtup53xfydkt35y65ajtfwre.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_11), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_3, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_6), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (48, 4), (4, 1))
assert_size_stride(primals_3, (48, ), (1, ))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 48), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf0, primals_3, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf0, primals_3, buf3, 256, grid=grid(256), stream=stream0)
del buf0
del primals_3
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del primals_6
return (buf12, primals_1, primals_5, buf6, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((48, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.cuda
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask=None):
return self._forward(inp, attn_mask)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size(
0), inp.size(1), n_head * d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = residual + attn_out
else:
output = self.layer_norm(residual + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.cuda
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (32 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 48 * x1 + 192 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0 + 4 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (48, 4), (4, 1))
assert_size_stride(primals_3, (48,), (1,))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 48), (48, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 48), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf0, primals_3, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](buf0, primals_3, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf0, primals_3, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 4), (16,
4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9,
buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_6
return buf12, primals_1, primals_5, buf6, reinterpret_tensor(buf8, (16,
16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf2, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0)
class MultiHeadAttnNew(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttnNew, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size(
0), inp.size(1), n_head * d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = residual + attn_out
else:
output = self.layer_norm(residual + attn_out)
return output
def forward(self, input_0):
primals_2 = self.qkv_net.weight
primals_3 = self.qkv_net.bias
primals_4 = self.o_net.weight
primals_5 = self.layer_norm.weight
primals_6 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| carolmanderson/NeMo | MultiHeadAttn | false | 6,418 | [
"Apache-2.0"
] | 1 | be7114e2d983af751e1af4119465c626682747b7 | https://github.com/carolmanderson/NeMo/tree/be7114e2d983af751e1af4119465c626682747b7 | import torch
import torch.cuda
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
class Model(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super().__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask=None):
return self._forward(inp, attn_mask)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(inp.size(
0), inp.size(1), n_head * d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = residual + attn_out
else:
output = self.layer_norm(residual + attn_out)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
FeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ih/cih5ihitkvge5mvg3spw6mfpygiyr5tuqj5xls6wqbhrkjta3jqd.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2048, 4), (4, 1))
assert_size_stride(primals_2, (2048, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2048), (2048, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 131072, grid=grid(131072), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class FeedForward(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2048, 4), (4, 1))
assert_size_stride(primals_2, (2048,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2048), (2048, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(131072)](buf0, primals_2, buf1,
buf2, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), primals_4
class FeedForwardNew(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, input_0):
primals_1 = self.linear_1.weight
primals_2 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| chandar-lab/CriticalGradientOptimization | FeedForward | false | 6,419 | [
"MIT"
] | 1 | 1af4b1df40489991289bb50bb69859a00b2c97c6 | https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class Model(nn.Module):
def __init__(self, emb_dim, ff_dim=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(emb_dim, ff_dim)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(ff_dim, emb_dim)
def forward(self, x):
x = self.dropout(F.leaky_relu(self.linear_1(x)))
x = self.linear_2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ul/culvxc5xcnacfjypzxghwcyc2445sqsz25ci4rib6axjxs3fv3so.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_1 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yr/cyr6fatjcqc5np3quy6arljtkkff4qjmueyb5b4pk5xvkxgrzuvd.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_1 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf3
return (buf4, buf1, buf0, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input_tensor, hidden_tensor):
combined = torch.cat((input_tensor, hidden_tensor), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__log_softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, buf1, buf0, buf4
class RNNNew(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNNew, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def forward(self, input_0, input_1):
primals_3 = self.i2h.weight
primals_4 = self.i2h.bias
primals_5 = self.i2o.weight
primals_6 = self.i2o.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| chauhankartik/DeepLearning-EarlySteps | RNN | false | 6,420 | [
"MIT"
] | 1 | 44b0189cf6e81f8032a6a80cc33ff80496ebd462 | https://github.com/chauhankartik/DeepLearning-EarlySteps/tree/44b0189cf6e81f8032a6a80cc33ff80496ebd462 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input_tensor, hidden_tensor):
combined = torch.cat((input_tensor, hidden_tensor), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def init_hidden(self):
return torch.zeros(1, self.hidden_size)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/63/c632cmks4wvpc5reioh6m2gkro2u67npny6lvpk455szn37idshj.py
# Topologically Sorted Source Nodes: [scores, softscores], Original ATen: [aten.div, aten._softmax]
# Source node to ATen node mapping:
# scores => div
# softscores => div_1, exp, sum_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_div_1 = async_compile.triton('triton_per_fused__softmax_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_div_1(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + (16*x0)), tmp2, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores, softscores], Original ATen: [aten.div, aten._softmax]
triton_per_fused__softmax_div_1.run(buf5, buf6, buf9, 256, 16, grid=grid(256), stream=stream0)
del buf5
buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf10, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf11, buf12, 64, 4, grid=grid(64, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_11
return (reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0), buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class MultiHeadAttention(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'emb_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_div_1(in_ptr0, out_ptr2, out_ptr3, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + 16 * x0), tmp2, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp14, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_div_1[grid(256)](buf5, buf6, buf9, 256,
16, XBLOCK=128, num_warps=8, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf10, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_linear.weight
primals_3 = self.q_linear.bias
primals_4 = self.k_linear.weight
primals_5 = self.k_linear.bias
primals_7 = self.v_linear.weight
primals_8 = self.v_linear.bias
primals_10 = self.out.weight
primals_11 = self.out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| chandar-lab/CriticalGradientOptimization | MultiHeadAttention | false | 6,421 | [
"MIT"
] | 1 | 1af4b1df40489991289bb50bb69859a00b2c97c6 | https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class Model(nn.Module):
def __init__(self, num_heads, emb_dim, dim_k=None, dropout=0.1):
super().__init__()
self.emb_dim = emb_dim
self.dim_k = dim_k if dim_k else emb_dim // num_heads
self.num_heads = num_heads
self.q_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.k_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.v_linear = nn.Linear(emb_dim, self.dim_k * num_heads)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(self.dim_k * num_heads, emb_dim)
def attention(self, q, k, v, dim_k, mask=None, dropout=None, explain=False
):
k = k.transpose(-2, -1)
if explain:
None
scores = torch.matmul(q, k) / math.sqrt(dim_k)
if explain:
None
if mask is not None:
mask = mask.unsqueeze(1)
if explain:
None
scores = scores.masked_fill(mask == 0, -1000000000.0)
softscores = F.softmax(scores, dim=-1)
if dropout is not None:
softscores = dropout(softscores)
output = torch.matmul(softscores, v)
return output, scores
def forward(self, q, k, v, mask=None, explain=False):
"""
inputs:
q has shape (batch size, q_sequence length, embedding dimensions)
k,v have shape (batch size, kv_sequence length, embedding dimensions)
mask of shape (batch size, 1, kv_sequence length)
explain: boolean, prints intermediate values if True
outputs: sequence of vectors, re-represented using attention
shape (batch size, q_sequence length, embedding dimensions)
use:
The encoder layer places the same source vector sequence into q,k,v
and mask into mask.
The decoder layer uses this twice, once with decoder inputs as q,k,v
and target mask as mask. then with decoder inputs as q, encoder outputs
as k, v and source mask as mask
"""
batch_size = q.size(0)
q = self.q_linear(q)
k = self.k_linear(k)
v = self.v_linear(v)
if explain:
None
k = k.view(batch_size, -1, self.num_heads, self.dim_k)
q = q.view(batch_size, -1, self.num_heads, self.dim_k)
v = v.view(batch_size, -1, self.num_heads, self.dim_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
if explain:
None
attn, scores = self.attention(q, k, v, self.dim_k, mask, self.
dropout, explain)
if explain:
None
concat = attn.transpose(1, 2).contiguous().view(batch_size, -1,
self.dim_k * self.num_heads)
if explain:
None
output = self.out(concat)
if explain:
None
return output, scores
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bm/cbmd63mrouqmm2pha5x6evse3dkbpy5o4xnk5v7quflfkqfdvwck.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# a => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/du/cduzwoecrea6mh72pekoclxecydqgccgdn7gwa7nwfckevtwnrny.py
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# a_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4t/c4tinndbfifahpyer77rm3uc2bmrxd7ki263fv7g4fyely5r2ouh.py
# Topologically Sorted Source Nodes: [tanh, mul], Original ATen: [aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {})
triton_poi_fused_mul_tanh_2 = async_compile.triton('triton_poi_fused_mul_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 5), (5, 1))
assert_size_stride(primals_5, (3, ), (1, ))
assert_size_stride(primals_6, (4, 3), (3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 320, grid=grid(320), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 5), (5, 1), 0), reinterpret_tensor(primals_4, (5, 3), (1, 5), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 3), (48, 12, 3, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.bool)
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 192, grid=grid(192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 3), (3, 1), 0), reinterpret_tensor(primals_6, (3, 4), (1, 3), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, mul], Original ATen: [aten.tanh, aten.mul]
triton_poi_fused_mul_tanh_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 5), (5, 1), 0), reinterpret_tensor(buf3, (64, 3), (3, 1), 0), buf4, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((3, 5), (5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 3), (3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 5), (5, 1))
assert_size_stride(primals_5, (3,), (1,))
assert_size_stride(primals_6, (4, 3), (3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(320)](buf1,
primals_2, buf7, 320, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 5), (5, 1), 0),
reinterpret_tensor(primals_4, (5, 3), (1, 5), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 3), (48, 12, 3, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(192)](buf3,
primals_5, buf6, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 3), (
3, 1), 0), reinterpret_tensor(primals_6, (3, 4), (1, 3), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_2[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 5), (5, 1), 0), reinterpret_tensor(
buf3, (64, 3), (3, 1), 0), buf4, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| chenbq1234/CityLearn | Actor | false | 6,422 | [
"MIT"
] | 1 | baa162435954ecd58e7f4769a46fa9046f4d2cf6 | https://github.com/chenbq1234/CityLearn/tree/baa162435954ecd58e7f4769a46fa9046f4d2cf6 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super().__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
BayesConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4t/c4t3fy3ocbidqwtb4ddu6p3sgw2edtz4tqkv2dyxlhyopmtkveig.py
# Topologically Sorted Source Nodes: [exp, pow_2], Original ATen: [aten.exp, aten.pow]
# Source node to ATen node mapping:
# exp => exp
# pow_2 => pow_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_4,), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%exp, 2), kwargs = {})
triton_poi_fused_exp_pow_0 = async_compile.triton('triton_poi_fused_exp_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f7/cf7lrsouksewpooa256opaenbmp6jqkmniws5qzkodbgkpyhyu3l.py
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# pow_1 => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/as/cas7bpvxghkeja2ktffdapzqbxbqwebgjti6m7ax2xj4rppupi2y.py
# Topologically Sorted Source Nodes: [conv1d_1, clamp, act_sigma, mul, add], Original ATen: [aten.convolution, aten.clamp, aten.sqrt, aten.mul, aten.add]
# Source node to ATen node mapping:
# act_sigma => sqrt
# add => add
# clamp => clamp_min
# conv1d_1 => convolution_1
# mul => mul
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %pow_2, %primals_2, [1], [4], [1], False, [0], 1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%squeeze_1, 1e-16), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %randn), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %mul), kwargs = {})
triton_poi_fused_add_clamp_convolution_mul_sqrt_2 = async_compile.triton('triton_poi_fused_add_clamp_convolution_mul_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_convolution_mul_sqrt_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_convolution_mul_sqrt_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 9)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x2), xmask)
tmp8 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = 1e-16
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp7 = libdevice.sqrt(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tmp4 + tmp9
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(in_out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [act_mu], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 9), (36, 9, 1))
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, pow_2], Original ATen: [aten.exp, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_pow_0.run(primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
triton_poi_fused_pow_1.run(primals_3, buf2, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4), (0, 4, 1), 0), buf1, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 9), (36, 9, 1))
# Topologically Sorted Source Nodes: [epsilon], Original ATen: [aten.randn_like]
buf5 = torch.ops.aten.randn.default([4, 9], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf6 = buf5
del buf5
buf4 = buf3; del buf3 # reuse
buf7 = reinterpret_tensor(buf0, (4, 9), (9, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d_1, clamp, act_sigma, mul, add], Original ATen: [aten.convolution, aten.clamp, aten.sqrt, aten.mul, aten.add]
triton_poi_fused_add_clamp_convolution_mul_sqrt_2.run(buf4, buf7, primals_2, buf6, 36, grid=grid(36), stream=stream0)
del primals_2
return (buf7, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf1, reinterpret_tensor(buf2, (1, 4, 4), (16, 4, 1), 0), buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesConv1d(nn.Module):
"""
This class implements a Bayesian 1-dimensional Convolutional layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=True, log_sigma_prior=-5, mu_prior=-1):
"""
Initializes BayesConv1d layer.
Args:
in_channels: number of input channels
out_channels: number of output channels
kernel_size: size of the convolutional kernel
stride: stride of the convolution
dilation: spacing between the kernel points of the convolution
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesConv1d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.w_mu = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_channels,
in_channels, kernel_size))
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Performs a forward pass of the input. Uses the Reparemetrization trick proposed by Kingma et al.
in "Variational Dropout and the Local Reparameterization trick" to sample directly from the activations.
Args:
input: the input to be forwarded
"""
act_mu = F.conv1d(input, self.w_mu, self.bias, self.stride, self.
padding, self.dilation)
act_sigma = torch.sqrt(torch.clamp(F.conv1d(input ** 2, torch.exp(
self.w_log_sigma) ** 2, self.bias, self.stride, self.padding,
self.dilation), min=1e-16))
epsilon = torch.randn_like(act_mu)
return act_mu + act_sigma * epsilon
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'padding': 4, 'dilation': 1}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_clamp_convolution_mul_sqrt_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 36
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 9
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x2, xmask)
tmp8 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = 1e-16
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp7 = libdevice.sqrt(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = tmp4 + tmp9
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(in_out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(4,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 9), (36, 9, 1))
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_pow_0[grid(64)](primals_4, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_pow_1[grid(16)](primals_3, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4
), (0, 4, 1), 0), buf1, stride=(1,), padding=(4,), dilation=(1,
), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 9), (36, 9, 1))
buf5 = torch.ops.aten.randn.default([4, 9], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf6 = buf5
del buf5
buf4 = buf3
del buf3
buf7 = reinterpret_tensor(buf0, (4, 9), (9, 1), 0)
del buf0
triton_poi_fused_add_clamp_convolution_mul_sqrt_2[grid(36)](buf4,
buf7, primals_2, buf6, 36, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf7, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4,
4), (16, 4, 1), 0), buf1, reinterpret_tensor(buf2, (1, 4, 4), (16,
4, 1), 0), buf4, buf6
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesConv1dNew(nn.Module):
"""
This class implements a Bayesian 1-dimensional Convolutional layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=True, log_sigma_prior=-5, mu_prior=-1):
"""
Initializes BayesConv1d layer.
Args:
in_channels: number of input channels
out_channels: number of output channels
kernel_size: size of the convolutional kernel
stride: stride of the convolution
dilation: spacing between the kernel points of the convolution
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesConv1dNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.w_mu = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_channels,
in_channels, kernel_size))
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def forward(self, input_0):
primals_1 = self.w_mu
primals_4 = self.w_log_sigma
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| chapmanbe/uncertainty | BayesConv1d | false | 6,423 | [
"Apache-2.0"
] | 1 | d4eec00e937c76043d57a13ffcc9618b1e08d967 | https://github.com/chapmanbe/uncertainty/tree/d4eec00e937c76043d57a13ffcc9618b1e08d967 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class Model(nn.Module):
"""
This class implements a Bayesian 1-dimensional Convolutional layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, bias=True, log_sigma_prior=-5, mu_prior=-1):
"""
Initializes BayesConv1d layer.
Args:
in_channels: number of input channels
out_channels: number of output channels
kernel_size: size of the convolutional kernel
stride: stride of the convolution
dilation: spacing between the kernel points of the convolution
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.w_mu = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_channels,
in_channels, kernel_size))
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Performs a forward pass of the input. Uses the Reparemetrization trick proposed by Kingma et al.
in "Variational Dropout and the Local Reparameterization trick" to sample directly from the activations.
Args:
input: the input to be forwarded
"""
act_mu = F.conv1d(input, self.w_mu, self.bias, self.stride, self.
padding, self.dilation)
act_sigma = torch.sqrt(torch.clamp(F.conv1d(input ** 2, torch.exp(
self.w_log_sigma) ** 2, self.bias, self.stride, self.padding,
self.dilation), min=1e-16))
epsilon = torch.randn_like(act_mu)
return act_mu + act_sigma * epsilon
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1, 'padding': 4, 'dilation': 1}]
|
BayesLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2q/c2qo3gh3y46foffuo4deaunecv5aknydoaksonjqz2rv3jg72afs.py
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# pow_1 => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
triton_poi_fused_pow_0 = async_compile.triton('triton_poi_fused_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u2/cu2bwgbqtjqj3753ql4tajeks6o4dwkkjzmvlm7utj2tgt7ldihs.py
# Topologically Sorted Source Nodes: [exp, pow_2], Original ATen: [aten.exp, aten.pow]
# Source node to ATen node mapping:
# exp => exp
# pow_2 => pow_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_4,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%exp, 2), kwargs = {})
triton_poi_fused_exp_pow_1 = async_compile.triton('triton_poi_fused_exp_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/23/c23hjwmn6dk5yetvs6i5u2ugpycc3ttuknrvp2vcu7zhbwozz7n2.py
# Topologically Sorted Source Nodes: [add, act_sigma, mul, add_1], Original ATen: [aten.add, aten.sqrt, aten.mul]
# Source node to ATen node mapping:
# act_sigma => sqrt
# add => add
# add_1 => add_1
# mul => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %randn), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul), kwargs = {})
triton_poi_fused_add_mul_sqrt_2 = async_compile.triton('triton_poi_fused_add_mul_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sqrt_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sqrt_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp7 = tl.load(in_ptr2 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_0.run(primals_3, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, pow_2], Original ATen: [aten.exp, aten.pow]
triton_poi_fused_exp_pow_1.run(primals_4, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf3)
del buf2
# Topologically Sorted Source Nodes: [epsilon], Original ATen: [aten.randn_like]
buf4 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, act_sigma, mul, add_1], Original ATen: [aten.add, aten.sqrt, aten.mul]
triton_poi_fused_add_mul_sqrt_2.run(buf6, primals_2, buf3, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf6, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesLinear(nn.Module):
"""
This class implements a Bayesian Linear layer, which has a distribution instead of weights.
"""
def __init__(self, in_features, out_features, bias=True,
log_sigma_prior=-5, mu_prior=-1):
"""
Initializes a BayesLinear layer.
Args:
in_features: number of input features
out_features: number of output features
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_features, in_features)
)
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Performs a forward pass of the input. Uses the Reparemetrization trick proposed by Kingma et al.
in "Variational Dropout and the Local Reparameterization trick" to sample directly from the activations.
Args:
input: the input to be forwarded
"""
act_mu = F.linear(input, self.w_mu, self.bias)
act_sigma = torch.sqrt(F.linear(input ** 2, torch.exp(self.
w_log_sigma) ** 2) + 1e-08)
epsilon = torch.randn_like(act_mu)
return act_mu + act_sigma * epsilon
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_exp_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sqrt_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(256)](primals_3, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_exp_pow_1[grid(16)](primals_4, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf2, (4, 4), (1, 4), 0), out=buf3)
del buf2
buf4 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_sqrt_2[grid(256)](buf6, primals_2, buf3,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf6, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, buf5
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class BayesLinearNew(nn.Module):
"""
This class implements a Bayesian Linear layer, which has a distribution instead of weights.
"""
def __init__(self, in_features, out_features, bias=True,
log_sigma_prior=-5, mu_prior=-1):
"""
Initializes a BayesLinear layer.
Args:
in_features: number of input features
out_features: number of output features
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super(BayesLinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_features, in_features)
)
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def forward(self, input_0):
primals_1 = self.w_mu
primals_4 = self.w_log_sigma
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| chapmanbe/uncertainty | BayesLinear | false | 6,424 | [
"Apache-2.0"
] | 1 | d4eec00e937c76043d57a13ffcc9618b1e08d967 | https://github.com/chapmanbe/uncertainty/tree/d4eec00e937c76043d57a13ffcc9618b1e08d967 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
def calculate_kl(mu_p, sig_p, mu_q, sig_q):
"""
Calculates the Kullback-Leibler divergence between two univariate Gaussians (p and q)
Args:
mu_p: mean of the Gaussian p
sig_p: standard deviation of the Gaussian p
mu_q: mean of the Gaussian q
sig_q: standard deviation of the Gaussian q
"""
kl = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) +
((mu_p - mu_q) / sig_p).pow(2)).sum()
return kl
class Model(nn.Module):
"""
This class implements a Bayesian Linear layer, which has a distribution instead of weights.
"""
def __init__(self, in_features, out_features, bias=True,
log_sigma_prior=-5, mu_prior=-1):
"""
Initializes a BayesLinear layer.
Args:
in_features: number of input features
out_features: number of output features
bias: whether to add bias
log_sigma_prior: the initial value of the standard deviation of the distribution
mu_prior: the initial value of the mean of the distribution
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.w_log_sigma = nn.Parameter(torch.Tensor(out_features, in_features)
)
self.mu_prior_init = mu_prior
self.log_sigma_prior_init = log_sigma_prior
if bias is True:
self.bias = nn.Parameter(torch.Tensor(out_features))
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the layer
"""
init.kaiming_uniform_(self.w_mu, a=math.sqrt(5))
init.uniform_(self.w_log_sigma, self.log_sigma_prior_init - 0.1,
self.log_sigma_prior_init)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.w_mu)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
Performs a forward pass of the input. Uses the Reparemetrization trick proposed by Kingma et al.
in "Variational Dropout and the Local Reparameterization trick" to sample directly from the activations.
Args:
input: the input to be forwarded
"""
act_mu = F.linear(input, self.w_mu, self.bias)
act_sigma = torch.sqrt(F.linear(input ** 2, torch.exp(self.
w_log_sigma) ** 2) + 1e-08)
epsilon = torch.randn_like(act_mu)
return act_mu + act_sigma * epsilon
def kl(self):
"""
Returns the Kullback-Leibler divergence between the prior and the posterior of Bayesian layer.
"""
return calculate_kl(torch.Tensor([self.mu_prior_init]).type_as(self
.w_mu), torch.exp(torch.Tensor([self.log_sigma_prior_init]).
type_as(self.w_mu)), self.w_mu, torch.exp(self.w_log_sigma))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PositionwiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/df/cdfcie57v6pcdd6oeaz4mvlgksxgyuxzmlv5bklwemyulqhtcxta.py
# Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# std => sqrt, var
# sub => sub
# truediv => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rg/crgv7fpu5yp7bran3e6f2xvynemba7inixjeyvqbl75yk7rlqbnp.py
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_2 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf4, primals_7, primals_1, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(features))
self.bias = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.weight * (x - mean) / (std + self.eps) + self.bias
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.intermediate = nn.Linear(d_model, d_ff)
self.output = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.intermediate(self.layer_norm(x)))
)
output = self.dropout_2(self.output(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf2,
primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(256)](buf4, primals_7, primals_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), primals_6, buf5, primals_4
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(features))
self.bias = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.weight * (x - mean) / (std + self.eps) + self.bias
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.intermediate = nn.Linear(d_model, d_ff)
self.output = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.intermediate.weight
primals_2 = self.intermediate.bias
primals_6 = self.output.weight
primals_3 = self.output.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| chengjunyan1/Graph-Sparse-Transformer | PositionwiseFeedForward | false | 6,425 | [
"Apache-2.0"
] | 1 | 2c3b77f81789ca80e0c30c32f0c702b2d3bac048 | https://github.com/chengjunyan1/Graph-Sparse-Transformer/tree/2c3b77f81789ca80e0c30c32f0c702b2d3bac048 | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization class
"""
def __init__(self, features, eps=1e-06):
super().__init__()
self.weight = nn.Parameter(torch.ones(features))
self.bias = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.weight * (x - mean) / (std + self.eps) + self.bias
class Model(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.intermediate = nn.Linear(d_model, d_ff)
self.output = nn.Linear(d_ff, d_model)
self.layer_norm = LayerNorm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.intermediate(self.layer_norm(x)))
)
output = self.dropout_2(self.output(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [sa], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# sa => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k2/ck2x2iaaotzlus7wcdhs2lify3zhpjoekcoeniy7pue6js4n64dv.py
# Topologically Sorted Source Nodes: [q1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# q1 => relu
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 7
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kn/cknjxowt463dsd4xxvk2n73q5qfxw6raumefipfoytafdx4mtxnp.py
# Topologically Sorted Source Nodes: [q1_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# q1_1 => relu_1
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_6), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (7, 8), (8, 1))
assert_size_stride(primals_4, (7, ), (1, ))
assert_size_stride(primals_5, (6, 7), (7, 1))
assert_size_stride(primals_6, (6, ), (1, ))
assert_size_stride(primals_7, (1, 6), (6, 1))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (7, 8), (8, 1))
assert_size_stride(primals_10, (7, ), (1, ))
assert_size_stride(primals_11, (6, 7), (7, 1))
assert_size_stride(primals_12, (6, ), (1, ))
assert_size_stride(primals_13, (1, 6), (6, 1))
assert_size_stride(primals_14, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [sa], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 7), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [q1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 28, grid=grid(28), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (7, 6), (1, 7), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [q1_1], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf4, primals_6, 24, grid=grid(24), stream=stream0)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [q1_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (6, 1), (1, 6), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 7), (1, 8), 0), out=buf7)
del primals_9
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [q2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf8, primals_10, 28, grid=grid(28), stream=stream0)
del primals_10
buf9 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (7, 6), (1, 7), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [q2_1], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf10, primals_12, 24, grid=grid(24), stream=stream0)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [q2_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(primals_13, (6, 1), (1, 6), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13, primals_11, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((7, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((6, 7), (7, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 6), (6, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((7, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((6, 7), (7, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, 6), (6, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 7
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (7, 8), (8, 1))
assert_size_stride(primals_4, (7,), (1,))
assert_size_stride(primals_5, (6, 7), (7, 1))
assert_size_stride(primals_6, (6,), (1,))
assert_size_stride(primals_7, (1, 6), (6, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (7, 8), (8, 1))
assert_size_stride(primals_10, (7,), (1,))
assert_size_stride(primals_11, (6, 7), (7, 1))
assert_size_stride(primals_12, (6,), (1,))
assert_size_stride(primals_13, (1, 6), (6, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 7), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(28)](buf2, primals_4, 28, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (7, 6), (1, 7
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(24)](buf4, primals_6, 24, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(6, 1), (1, 6), 0), alpha=1, beta=1, out=buf6)
del primals_8
buf7 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_9, (8, 7), (1, 8
), 0), out=buf7)
del primals_9
buf8 = buf7
del buf7
triton_poi_fused_relu_1[grid(28)](buf8, primals_10, 28, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_10
buf9 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_11, (7, 6), (1,
7), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_2[grid(24)](buf10, primals_12, 24, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_14, buf10, reinterpret_tensor(
primals_13, (6, 1), (1, 6), 0), alpha=1, beta=1, out=buf12)
del primals_14
return (buf6, buf12, buf0, buf2, buf4, buf8, buf10, primals_13,
primals_11, primals_7, primals_5)
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_9 = self.l4.weight
primals_10 = self.l4.bias
primals_11 = self.l5.weight
primals_12 = self.l5.bias
primals_13 = self.l6.weight
primals_14 = self.l6.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0], output[1]
| chenbq1234/CityLearn | Critic | false | 6,426 | [
"MIT"
] | 1 | baa162435954ecd58e7f4769a46fa9046f4d2cf6 | https://github.com/chenbq1234/CityLearn/tree/baa162435954ecd58e7f4769a46fa9046f4d2cf6 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.